| Alternative 1 | |
|---|---|
| Error | 0.0 |
| Cost | 13184 |
\[0.5 \cdot \left(\mathsf{log1p}\left(x\right) - \mathsf{log1p}\left(-x\right)\right)
\]
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
(FPCore (x) :precision binary64 (* 0.5 (+ (* 0.2857142857142857 (pow x 7.0)) (+ (* x 2.0) (+ (* 0.6666666666666666 (pow x 3.0)) (* 0.4 (pow x 5.0)))))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
double code(double x) {
return 0.5 * ((0.2857142857142857 * pow(x, 7.0)) + ((x * 2.0) + ((0.6666666666666666 * pow(x, 3.0)) + (0.4 * pow(x, 5.0)))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
real(8) function code(x)
real(8), intent (in) :: x
code = 0.5d0 * ((0.2857142857142857d0 * (x ** 7.0d0)) + ((x * 2.0d0) + ((0.6666666666666666d0 * (x ** 3.0d0)) + (0.4d0 * (x ** 5.0d0)))))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
public static double code(double x) {
return 0.5 * ((0.2857142857142857 * Math.pow(x, 7.0)) + ((x * 2.0) + ((0.6666666666666666 * Math.pow(x, 3.0)) + (0.4 * Math.pow(x, 5.0)))));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
def code(x): return 0.5 * ((0.2857142857142857 * math.pow(x, 7.0)) + ((x * 2.0) + ((0.6666666666666666 * math.pow(x, 3.0)) + (0.4 * math.pow(x, 5.0)))))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function code(x) return Float64(0.5 * Float64(Float64(0.2857142857142857 * (x ^ 7.0)) + Float64(Float64(x * 2.0) + Float64(Float64(0.6666666666666666 * (x ^ 3.0)) + Float64(0.4 * (x ^ 5.0)))))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
function tmp = code(x) tmp = 0.5 * ((0.2857142857142857 * (x ^ 7.0)) + ((x * 2.0) + ((0.6666666666666666 * (x ^ 3.0)) + (0.4 * (x ^ 5.0))))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
code[x_] := N[(0.5 * N[(N[(0.2857142857142857 * N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision] + N[(N[(x * 2.0), $MachinePrecision] + N[(N[(0.6666666666666666 * N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision] + N[(0.4 * N[Power[x, 5.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
0.5 \cdot \left(0.2857142857142857 \cdot {x}^{7} + \left(x \cdot 2 + \left(0.6666666666666666 \cdot {x}^{3} + 0.4 \cdot {x}^{5}\right)\right)\right)
Results
Initial program 58.5
Simplified0.0
Taylor expanded in x around 0 0.2
Final simplification0.2
| Alternative 1 | |
|---|---|
| Error | 0.0 |
| Cost | 13184 |
| Alternative 2 | |
|---|---|
| Error | 0.4 |
| Cost | 6976 |
| Alternative 3 | |
|---|---|
| Error | 0.4 |
| Cost | 832 |
| Alternative 4 | |
|---|---|
| Error | 0.7 |
| Cost | 320 |

herbie shell --seed 2022225
(FPCore (x)
:name "Hyperbolic arc-(co)tangent"
:precision binary64
(* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))