
(FPCore (x) :precision binary64 (* 3.0 (+ (- (* (* x 3.0) x) (* x 4.0)) 1.0)))
double code(double x) {
return 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 * ((((x * 3.0d0) * x) - (x * 4.0d0)) + 1.0d0)
end function
public static double code(double x) {
return 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0);
}
def code(x): return 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0)
function code(x) return Float64(3.0 * Float64(Float64(Float64(Float64(x * 3.0) * x) - Float64(x * 4.0)) + 1.0)) end
function tmp = code(x) tmp = 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0); end
code[x_] := N[(3.0 * N[(N[(N[(N[(x * 3.0), $MachinePrecision] * x), $MachinePrecision] - N[(x * 4.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot \left(\left(\left(x \cdot 3\right) \cdot x - x \cdot 4\right) + 1\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* 3.0 (+ (- (* (* x 3.0) x) (* x 4.0)) 1.0)))
double code(double x) {
return 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 * ((((x * 3.0d0) * x) - (x * 4.0d0)) + 1.0d0)
end function
public static double code(double x) {
return 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0);
}
def code(x): return 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0)
function code(x) return Float64(3.0 * Float64(Float64(Float64(Float64(x * 3.0) * x) - Float64(x * 4.0)) + 1.0)) end
function tmp = code(x) tmp = 3.0 * ((((x * 3.0) * x) - (x * 4.0)) + 1.0); end
code[x_] := N[(3.0 * N[(N[(N[(N[(x * 3.0), $MachinePrecision] * x), $MachinePrecision] - N[(x * 4.0), $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 \cdot \left(\left(\left(x \cdot 3\right) \cdot x - x \cdot 4\right) + 1\right)
\end{array}
(FPCore (x) :precision binary64 (+ 3.0 (* x (* x (- 9.0 (/ 12.0 x))))))
double code(double x) {
return 3.0 + (x * (x * (9.0 - (12.0 / x))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 + (x * (x * (9.0d0 - (12.0d0 / x))))
end function
public static double code(double x) {
return 3.0 + (x * (x * (9.0 - (12.0 / x))));
}
def code(x): return 3.0 + (x * (x * (9.0 - (12.0 / x))))
function code(x) return Float64(3.0 + Float64(x * Float64(x * Float64(9.0 - Float64(12.0 / x))))) end
function tmp = code(x) tmp = 3.0 + (x * (x * (9.0 - (12.0 / x)))); end
code[x_] := N[(3.0 + N[(x * N[(x * N[(9.0 - N[(12.0 / x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 + x \cdot \left(x \cdot \left(9 - \frac{12}{x}\right)\right)
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 99.9%
Taylor expanded in x around inf 99.9%
associate-*r/99.9%
metadata-eval99.9%
Simplified99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (+ 3.0 (* x (- (* x 9.0) 12.0))))
double code(double x) {
return 3.0 + (x * ((x * 9.0) - 12.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 + (x * ((x * 9.0d0) - 12.0d0))
end function
public static double code(double x) {
return 3.0 + (x * ((x * 9.0) - 12.0));
}
def code(x): return 3.0 + (x * ((x * 9.0) - 12.0))
function code(x) return Float64(3.0 + Float64(x * Float64(Float64(x * 9.0) - 12.0))) end
function tmp = code(x) tmp = 3.0 + (x * ((x * 9.0) - 12.0)); end
code[x_] := N[(3.0 + N[(x * N[(N[(x * 9.0), $MachinePrecision] - 12.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 + x \cdot \left(x \cdot 9 - 12\right)
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 99.9%
Final simplification99.9%
(FPCore (x) :precision binary64 (+ 3.0 (* x (* x 9.0))))
double code(double x) {
return 3.0 + (x * (x * 9.0));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 + (x * (x * 9.0d0))
end function
public static double code(double x) {
return 3.0 + (x * (x * 9.0));
}
def code(x): return 3.0 + (x * (x * 9.0))
function code(x) return Float64(3.0 + Float64(x * Float64(x * 9.0))) end
function tmp = code(x) tmp = 3.0 + (x * (x * 9.0)); end
code[x_] := N[(3.0 + N[(x * N[(x * 9.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 + x \cdot \left(x \cdot 9\right)
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 99.9%
Taylor expanded in x around inf 97.9%
*-commutative97.9%
Simplified97.9%
Final simplification97.9%
(FPCore (x) :precision binary64 (+ 3.0 (* x -12.0)))
double code(double x) {
return 3.0 + (x * -12.0);
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 + (x * (-12.0d0))
end function
public static double code(double x) {
return 3.0 + (x * -12.0);
}
def code(x): return 3.0 + (x * -12.0)
function code(x) return Float64(3.0 + Float64(x * -12.0)) end
function tmp = code(x) tmp = 3.0 + (x * -12.0); end
code[x_] := N[(3.0 + N[(x * -12.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 + x \cdot -12
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 55.7%
*-commutative55.7%
Simplified55.7%
Final simplification55.7%
(FPCore (x) :precision binary64 3.0)
double code(double x) {
return 3.0;
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0
end function
public static double code(double x) {
return 3.0;
}
def code(x): return 3.0
function code(x) return 3.0 end
function tmp = code(x) tmp = 3.0; end
code[x_] := 3.0
\begin{array}{l}
\\
3
\end{array}
Initial program 99.8%
Taylor expanded in x around 0 54.6%
Final simplification54.6%
(FPCore (x) :precision binary64 (+ 3.0 (- (* (* 9.0 x) x) (* 12.0 x))))
double code(double x) {
return 3.0 + (((9.0 * x) * x) - (12.0 * x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 3.0d0 + (((9.0d0 * x) * x) - (12.0d0 * x))
end function
public static double code(double x) {
return 3.0 + (((9.0 * x) * x) - (12.0 * x));
}
def code(x): return 3.0 + (((9.0 * x) * x) - (12.0 * x))
function code(x) return Float64(3.0 + Float64(Float64(Float64(9.0 * x) * x) - Float64(12.0 * x))) end
function tmp = code(x) tmp = 3.0 + (((9.0 * x) * x) - (12.0 * x)); end
code[x_] := N[(3.0 + N[(N[(N[(9.0 * x), $MachinePrecision] * x), $MachinePrecision] - N[(12.0 * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
3 + \left(\left(9 \cdot x\right) \cdot x - 12 \cdot x\right)
\end{array}
herbie shell --seed 2024071
(FPCore (x)
:name "Diagrams.Tangent:$catParam from diagrams-lib-1.3.0.3, D"
:precision binary64
:alt
(+ 3.0 (- (* (* 9.0 x) x) (* 12.0 x)))
(* 3.0 (+ (- (* (* x 3.0) x) (* x 4.0)) 1.0)))