
(FPCore (x) :precision binary64 (* (* x 3.0) x))
double code(double x) {
return (x * 3.0) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 3.0d0) * x
end function
public static double code(double x) {
return (x * 3.0) * x;
}
def code(x): return (x * 3.0) * x
function code(x) return Float64(Float64(x * 3.0) * x) end
function tmp = code(x) tmp = (x * 3.0) * x; end
code[x_] := N[(N[(x * 3.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 3\right) \cdot x
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (* x 3.0) x))
double code(double x) {
return (x * 3.0) * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * 3.0d0) * x
end function
public static double code(double x) {
return (x * 3.0) * x;
}
def code(x): return (x * 3.0) * x
function code(x) return Float64(Float64(x * 3.0) * x) end
function tmp = code(x) tmp = (x * 3.0) * x; end
code[x_] := N[(N[(x * 3.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot 3\right) \cdot x
\end{array}
(FPCore (x) :precision binary64 (* (* x x) 3.0))
double code(double x) {
return (x * x) * 3.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (x * x) * 3.0d0
end function
public static double code(double x) {
return (x * x) * 3.0;
}
def code(x): return (x * x) * 3.0
function code(x) return Float64(Float64(x * x) * 3.0) end
function tmp = code(x) tmp = (x * x) * 3.0; end
code[x_] := N[(N[(x * x), $MachinePrecision] * 3.0), $MachinePrecision]
\begin{array}{l}
\\
\left(x \cdot x\right) \cdot 3
\end{array}
Initial program 99.8%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.8
Applied rewrites99.8%
(FPCore (x) :precision binary64 (* 3.0 x))
double code(double x) {
return 3.0 * x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 * x
end function
public static double code(double x) {
return 3.0 * x;
}
def code(x): return 3.0 * x
function code(x) return Float64(3.0 * x) end
function tmp = code(x) tmp = 3.0 * x; end
code[x_] := N[(3.0 * x), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot x
\end{array}
Initial program 99.8%
Applied rewrites4.8%
(FPCore (x) :precision binary64 3.0)
double code(double x) {
return 3.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0
end function
public static double code(double x) {
return 3.0;
}
def code(x): return 3.0
function code(x) return 3.0 end
function tmp = code(x) tmp = 3.0; end
code[x_] := 3.0
\begin{array}{l}
\\
3
\end{array}
Initial program 99.8%
Applied rewrites4.4%
herbie shell --seed 2024254
(FPCore (x)
:name "Diagrams.Tangent:$catParam from diagrams-lib-1.3.0.3, F"
:precision binary64
(* (* x 3.0) x))