
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 7 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))
double code(double x) {
return (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = (1.0d0 / 2.0d0) * log(((1.0d0 + x) / (1.0d0 - x)))
end function
public static double code(double x) {
return (1.0 / 2.0) * Math.log(((1.0 + x) / (1.0 - x)));
}
def code(x): return (1.0 / 2.0) * math.log(((1.0 + x) / (1.0 - x)))
function code(x) return Float64(Float64(1.0 / 2.0) * log(Float64(Float64(1.0 + x) / Float64(1.0 - x)))) end
function tmp = code(x) tmp = (1.0 / 2.0) * log(((1.0 + x) / (1.0 - x))); end
code[x_] := N[(N[(1.0 / 2.0), $MachinePrecision] * N[Log[N[(N[(1.0 + x), $MachinePrecision] / N[(1.0 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\frac{1}{2} \cdot \log \left(\frac{1 + x}{1 - x}\right)
\end{array}
(FPCore (x) :precision binary64 (* 0.5 (- (* 2.0 (log1p x)) (log1p (* (- x) x)))))
double code(double x) {
return 0.5 * ((2.0 * log1p(x)) - log1p((-x * x)));
}
public static double code(double x) {
return 0.5 * ((2.0 * Math.log1p(x)) - Math.log1p((-x * x)));
}
def code(x): return 0.5 * ((2.0 * math.log1p(x)) - math.log1p((-x * x)))
function code(x) return Float64(0.5 * Float64(Float64(2.0 * log1p(x)) - log1p(Float64(Float64(-x) * x)))) end
code[x_] := N[(0.5 * N[(N[(2.0 * N[Log[1 + x], $MachinePrecision]), $MachinePrecision] - N[Log[1 + N[((-x) * x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0.5 \cdot \left(2 \cdot \mathsf{log1p}\left(x\right) - \mathsf{log1p}\left(\left(-x\right) \cdot x\right)\right)
\end{array}
Initial program 9.2%
Applied rewrites100.0%
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
lift-/.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (- (log1p x) (log1p (- x))) 0.5))
double code(double x) {
return (log1p(x) - log1p(-x)) * 0.5;
}
public static double code(double x) {
return (Math.log1p(x) - Math.log1p(-x)) * 0.5;
}
def code(x): return (math.log1p(x) - math.log1p(-x)) * 0.5
function code(x) return Float64(Float64(log1p(x) - log1p(Float64(-x))) * 0.5) end
code[x_] := N[(N[(N[Log[1 + x], $MachinePrecision] - N[Log[1 + (-x)], $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{log1p}\left(x\right) - \mathsf{log1p}\left(-x\right)\right) \cdot 0.5
\end{array}
Initial program 9.2%
lift-/.f64N/A
metadata-eval9.2
Applied rewrites9.2%
lift-*.f64N/A
*-commutativeN/A
lower-*.f649.2
lift-log.f64N/A
lift-/.f64N/A
log-divN/A
lift-+.f64N/A
lift-log1p.f64N/A
lower--.f64N/A
lift--.f64N/A
sub-negN/A
lift-neg.f64N/A
lower-log1p.f64100.0
Applied rewrites100.0%
(FPCore (x)
:precision binary64
(*
(-
(* 2.0 (log1p x))
(*
(*
(fma
(fma (fma (* -0.25 x) x -0.3333333333333333) (* x x) -0.5)
(* x x)
-1.0)
x)
x))
0.5))
double code(double x) {
return ((2.0 * log1p(x)) - ((fma(fma(fma((-0.25 * x), x, -0.3333333333333333), (x * x), -0.5), (x * x), -1.0) * x) * x)) * 0.5;
}
function code(x) return Float64(Float64(Float64(2.0 * log1p(x)) - Float64(Float64(fma(fma(fma(Float64(-0.25 * x), x, -0.3333333333333333), Float64(x * x), -0.5), Float64(x * x), -1.0) * x) * x)) * 0.5) end
code[x_] := N[(N[(N[(2.0 * N[Log[1 + x], $MachinePrecision]), $MachinePrecision] - N[(N[(N[(N[(N[(N[(-0.25 * x), $MachinePrecision] * x + -0.3333333333333333), $MachinePrecision] * N[(x * x), $MachinePrecision] + -0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + -1.0), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(2 \cdot \mathsf{log1p}\left(x\right) - \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.25 \cdot x, x, -0.3333333333333333\right), x \cdot x, -0.5\right), x \cdot x, -1\right) \cdot x\right) \cdot x\right) \cdot 0.5
\end{array}
Initial program 9.2%
Applied rewrites100.0%
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
lift-/.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*r*N/A
lower-*.f64N/A
Applied rewrites99.5%
Final simplification99.5%
(FPCore (x)
:precision binary64
(*
(*
(fma
(fma (fma 0.2857142857142857 (* x x) 0.4) (* x x) 0.6666666666666666)
(* x x)
2.0)
x)
0.5))
double code(double x) {
return (fma(fma(fma(0.2857142857142857, (x * x), 0.4), (x * x), 0.6666666666666666), (x * x), 2.0) * x) * 0.5;
}
function code(x) return Float64(Float64(fma(fma(fma(0.2857142857142857, Float64(x * x), 0.4), Float64(x * x), 0.6666666666666666), Float64(x * x), 2.0) * x) * 0.5) end
code[x_] := N[(N[(N[(N[(N[(0.2857142857142857 * N[(x * x), $MachinePrecision] + 0.4), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision] * x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.2857142857142857, x \cdot x, 0.4\right), x \cdot x, 0.6666666666666666\right), x \cdot x, 2\right) \cdot x\right) \cdot 0.5
\end{array}
Initial program 9.2%
Applied rewrites100.0%
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
lift-*.f64N/A
*-commutativeN/A
lower-*.f64100.0
lift-/.f64N/A
metadata-eval100.0
Applied rewrites100.0%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.5
Applied rewrites99.5%
(FPCore (x) :precision binary64 (* (* (fma (fma (* x x) 0.4 0.6666666666666666) (* x x) 2.0) x) 0.5))
double code(double x) {
return (fma(fma((x * x), 0.4, 0.6666666666666666), (x * x), 2.0) * x) * 0.5;
}
function code(x) return Float64(Float64(fma(fma(Float64(x * x), 0.4, 0.6666666666666666), Float64(x * x), 2.0) * x) * 0.5) end
code[x_] := N[(N[(N[(N[(N[(x * x), $MachinePrecision] * 0.4 + 0.6666666666666666), $MachinePrecision] * N[(x * x), $MachinePrecision] + 2.0), $MachinePrecision] * x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(\mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.4, 0.6666666666666666\right), x \cdot x, 2\right) \cdot x\right) \cdot 0.5
\end{array}
Initial program 9.2%
Taylor expanded in x around 0
*-commutativeN/A
lower-*.f64N/A
+-commutativeN/A
*-commutativeN/A
lower-fma.f64N/A
+-commutativeN/A
lower-fma.f64N/A
unpow2N/A
lower-*.f64N/A
unpow2N/A
lower-*.f6499.4
Applied rewrites99.4%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6499.4
lift-/.f64N/A
metadata-evalN/A
Applied rewrites99.4%
(FPCore (x) :precision binary64 (fma (* (* x x) x) 0.3333333333333333 x))
double code(double x) {
return fma(((x * x) * x), 0.3333333333333333, x);
}
function code(x) return fma(Float64(Float64(x * x) * x), 0.3333333333333333, x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * 0.3333333333333333 + x), $MachinePrecision]
\begin{array}{l}
\\
\mathsf{fma}\left(\left(x \cdot x\right) \cdot x, 0.3333333333333333, x\right)
\end{array}
Initial program 9.2%
Taylor expanded in x around 0
+-commutativeN/A
distribute-lft-inN/A
*-commutativeN/A
associate-*r*N/A
unpow2N/A
cube-multN/A
*-rgt-identityN/A
lower-fma.f64N/A
lower-pow.f6499.3
Applied rewrites99.3%
Applied rewrites99.3%
(FPCore (x) :precision binary64 (* (* 2.0 x) 0.5))
double code(double x) {
return (2.0 * x) * 0.5;
}
real(8) function code(x)
real(8), intent (in) :: x
code = (2.0d0 * x) * 0.5d0
end function
public static double code(double x) {
return (2.0 * x) * 0.5;
}
def code(x): return (2.0 * x) * 0.5
function code(x) return Float64(Float64(2.0 * x) * 0.5) end
function tmp = code(x) tmp = (2.0 * x) * 0.5; end
code[x_] := N[(N[(2.0 * x), $MachinePrecision] * 0.5), $MachinePrecision]
\begin{array}{l}
\\
\left(2 \cdot x\right) \cdot 0.5
\end{array}
Initial program 9.2%
Taylor expanded in x around 0
lower-*.f6498.8
Applied rewrites98.8%
lift-*.f64N/A
*-commutativeN/A
lower-*.f6498.8
lift-/.f64N/A
metadata-evalN/A
Applied rewrites98.8%
Final simplification98.8%
herbie shell --seed 2024284
(FPCore (x)
:name "Hyperbolic arc-(co)tangent"
:precision binary64
(* (/ 1.0 2.0) (log (/ (+ 1.0 x) (- 1.0 x)))))