
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
(FPCore (x)
:precision binary64
(*
x
(+
1.0
(*
(* x x)
(+
0.3333333333333333
(* (* x x) (+ 0.2 (* x (* x 0.14285714285714285)))))))))
double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + (x * (x * 0.14285714285714285)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + ((x * x) * (0.3333333333333333d0 + ((x * x) * (0.2d0 + (x * (x * 0.14285714285714285d0)))))))
end function
public static double code(double x) {
return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + (x * (x * 0.14285714285714285)))))));
}
def code(x): return x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + (x * (x * 0.14285714285714285)))))))
function code(x) return Float64(x * Float64(1.0 + Float64(Float64(x * x) * Float64(0.3333333333333333 + Float64(Float64(x * x) * Float64(0.2 + Float64(x * Float64(x * 0.14285714285714285)))))))) end
function tmp = code(x) tmp = x * (1.0 + ((x * x) * (0.3333333333333333 + ((x * x) * (0.2 + (x * (x * 0.14285714285714285))))))); end
code[x_] := N[(x * N[(1.0 + N[(N[(x * x), $MachinePrecision] * N[(0.3333333333333333 + N[(N[(x * x), $MachinePrecision] * N[(0.2 + N[(x * N[(x * 0.14285714285714285), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + \left(x \cdot x\right) \cdot \left(0.3333333333333333 + \left(x \cdot x\right) \cdot \left(0.2 + x \cdot \left(x \cdot 0.14285714285714285\right)\right)\right)\right)
\end{array}
Initial program 7.8%
*-lowering-*.f64N/A
metadata-evalN/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
--lowering--.f647.8%
Simplified7.8%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.8%
Simplified99.8%
(FPCore (x) :precision binary64 (* x (+ 1.0 (* x (* x (+ 0.3333333333333333 (* x (* x 0.2))))))))
double code(double x) {
return x * (1.0 + (x * (x * (0.3333333333333333 + (x * (x * 0.2))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + (x * (x * (0.3333333333333333d0 + (x * (x * 0.2d0))))))
end function
public static double code(double x) {
return x * (1.0 + (x * (x * (0.3333333333333333 + (x * (x * 0.2))))));
}
def code(x): return x * (1.0 + (x * (x * (0.3333333333333333 + (x * (x * 0.2))))))
function code(x) return Float64(x * Float64(1.0 + Float64(x * Float64(x * Float64(0.3333333333333333 + Float64(x * Float64(x * 0.2))))))) end
function tmp = code(x) tmp = x * (1.0 + (x * (x * (0.3333333333333333 + (x * (x * 0.2)))))); end
code[x_] := N[(x * N[(1.0 + N[(x * N[(x * N[(0.3333333333333333 + N[(x * N[(x * 0.2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + x \cdot \left(x \cdot \left(0.3333333333333333 + x \cdot \left(x \cdot 0.2\right)\right)\right)\right)
\end{array}
Initial program 7.8%
*-lowering-*.f64N/A
metadata-evalN/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
--lowering--.f647.8%
Simplified7.8%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.8%
Simplified99.8%
(FPCore (x) :precision binary64 (* x (+ 1.0 (* x (* x 0.3333333333333333)))))
double code(double x) {
return x * (1.0 + (x * (x * 0.3333333333333333)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = x * (1.0d0 + (x * (x * 0.3333333333333333d0)))
end function
public static double code(double x) {
return x * (1.0 + (x * (x * 0.3333333333333333)));
}
def code(x): return x * (1.0 + (x * (x * 0.3333333333333333)))
function code(x) return Float64(x * Float64(1.0 + Float64(x * Float64(x * 0.3333333333333333)))) end
function tmp = code(x) tmp = x * (1.0 + (x * (x * 0.3333333333333333))); end
code[x_] := N[(x * N[(1.0 + N[(x * N[(x * 0.3333333333333333), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
x \cdot \left(1 + x \cdot \left(x \cdot 0.3333333333333333\right)\right)
\end{array}
Initial program 7.8%
*-lowering-*.f64N/A
metadata-evalN/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
--lowering--.f647.8%
Simplified7.8%
Taylor expanded in x around 0
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*r*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6499.7%
Simplified99.7%
(FPCore (x) :precision binary64 x)
double code(double x) {
return x;
}
real(8) function code(x)
real(8), intent (in) :: x
code = x
end function
public static double code(double x) {
return x;
}
def code(x): return x
function code(x) return x end
function tmp = code(x) tmp = x; end
code[x_] := x
\begin{array}{l}
\\
x
\end{array}
Initial program 7.8%
*-lowering-*.f64N/A
metadata-evalN/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
--lowering--.f647.8%
Simplified7.8%
Taylor expanded in x around 0
Simplified99.4%
herbie shell --seed 2024139
(FPCore (x)
:name "Hyperbolic arc-(co)tangent"
:precision binary64
(* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))