
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
(FPCore (x)
:precision binary64
(*
x
(+
1.0
(*
(pow x 2.0)
(+
0.3333333333333333
(* (* x x) (+ 0.2 (* (* x x) 0.14285714285714285))))))))
double code(double x) {
return x * (1.0 + (pow(x, 2.0) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + ((x ** 2.0d0) * (0.3333333333333333d0 + ((x * x) * (0.2d0 + ((x * x) * 0.14285714285714285d0))))))
end function
public static double code(double x) {
return x * (1.0 + (Math.pow(x, 2.0) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))));
}
def code(x): return x * (1.0 + (math.pow(x, 2.0) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285))))))
function code(x) return Float64(x * Float64(1.0 + Float64((x ^ 2.0) * Float64(0.3333333333333333 + Float64(Float64(x * x) * Float64(0.2 + Float64(Float64(x * x) * 0.14285714285714285))))))) end
function tmp = code(x) tmp = x * (1.0 + ((x ^ 2.0) * (0.3333333333333333 + ((x * x) * (0.2 + ((x * x) * 0.14285714285714285)))))); end
code[x_] := N[(x * N[(1.0 + N[(N[Power[x, 2.0], $MachinePrecision] * N[(0.3333333333333333 + N[(N[(x * x), $MachinePrecision] * N[(0.2 + N[(N[(x * x), $MachinePrecision] * 0.14285714285714285), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + {x}^{2} \cdot \left(0.3333333333333333 + \left(x \cdot x\right) \cdot \left(0.2 + \left(x \cdot x\right) \cdot 0.14285714285714285\right)\right)\right)
\end{array}
Initial program 9.3%
metadata-eval9.3%
Simplified9.3%
Taylor expanded in x around 0 100.0%
*-commutative100.0%
Simplified100.0%
unpow2100.0%
Applied egg-rr100.0%
unpow2100.0%
Applied egg-rr100.0%
(FPCore (x) :precision binary64 (* x (+ 1.0 (* (* x x) (+ 0.3333333333333333 (* (* x x) 0.2))))))
double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + ((x * x) * (0.3333333333333333d0 + ((x * x) * 0.2d0))))
end function
public static double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))));
}
def code(x): return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2))))
function code(x) return Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(Float64(x * x) * 0.2))))) end
function tmp = code(x) tmp = x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * 0.2)))); end
code[x_] := N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(N[(x * x), $MachinePrecision] * 0.2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.3333333333333333 + \left(x \cdot x\right) \cdot 0.2\right)\right)
\end{array}
Initial program 9.3%
metadata-eval9.3%
Simplified9.3%
Taylor expanded in x around 0 99.9%
*-commutative99.9%
Simplified99.9%
unpow2100.0%
Applied egg-rr99.9%
unpow2100.0%
Applied egg-rr99.9%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 9.3%
metadata-eval9.3%
Simplified9.3%
Taylor expanded in x around 0 98.6%
herbie shell --seed 2024130
(FPCore (x)
:name "Hyperbolic arc-(co)tangent"
:precision binary64
(* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))