?

Average Accuracy: 4.0% → 99.4%
Time: 8.4s
Precision: binary64
Cost: 576

?

\[-1 < x \land x < 1\]
\[\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)} \]
\[\left(x \cdot \left(x \cdot -0.5\right) + -1\right) - x \]
(FPCore (x) :precision binary64 (/ (log (- 1.0 x)) (log (+ 1.0 x))))
(FPCore (x) :precision binary64 (- (+ (* x (* x -0.5)) -1.0) x))
double code(double x) {
	return log((1.0 - x)) / log((1.0 + x));
}
double code(double x) {
	return ((x * (x * -0.5)) + -1.0) - x;
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = log((1.0d0 - x)) / log((1.0d0 + x))
end function
real(8) function code(x)
    real(8), intent (in) :: x
    code = ((x * (x * (-0.5d0))) + (-1.0d0)) - x
end function
public static double code(double x) {
	return Math.log((1.0 - x)) / Math.log((1.0 + x));
}
public static double code(double x) {
	return ((x * (x * -0.5)) + -1.0) - x;
}
def code(x):
	return math.log((1.0 - x)) / math.log((1.0 + x))
def code(x):
	return ((x * (x * -0.5)) + -1.0) - x
function code(x)
	return Float64(log(Float64(1.0 - x)) / log(Float64(1.0 + x)))
end
function code(x)
	return Float64(Float64(Float64(x * Float64(x * -0.5)) + -1.0) - x)
end
function tmp = code(x)
	tmp = log((1.0 - x)) / log((1.0 + x));
end
function tmp = code(x)
	tmp = ((x * (x * -0.5)) + -1.0) - x;
end
code[x_] := N[(N[Log[N[(1.0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(1.0 + x), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
code[x_] := N[(N[(N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision] + -1.0), $MachinePrecision] - x), $MachinePrecision]
\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)}
\left(x \cdot \left(x \cdot -0.5\right) + -1\right) - x

Error?

Try it out?

Your Program's Arguments

Results

Enter valid numbers for all inputs

Target

Original4.0%
Target99.6%
Herbie99.4%
\[-\left(\left(\left(1 + x\right) + \frac{x \cdot x}{2}\right) + 0.4166666666666667 \cdot {x}^{3}\right) \]

Derivation?

  1. Initial program 4.0%

    \[\frac{\log \left(1 - x\right)}{\log \left(1 + x\right)} \]
  2. Simplified100.0%

    \[\leadsto \color{blue}{\frac{\mathsf{log1p}\left(-x\right)}{\mathsf{log1p}\left(x\right)}} \]
    Proof

    [Start]4.0

    \[ \frac{\log \left(1 - x\right)}{\log \left(1 + x\right)} \]

    sub-neg [=>]4.0

    \[ \frac{\log \color{blue}{\left(1 + \left(-x\right)\right)}}{\log \left(1 + x\right)} \]

    log1p-def [=>]3.0

    \[ \frac{\color{blue}{\mathsf{log1p}\left(-x\right)}}{\log \left(1 + x\right)} \]

    log1p-def [=>]100.0

    \[ \frac{\mathsf{log1p}\left(-x\right)}{\color{blue}{\mathsf{log1p}\left(x\right)}} \]
  3. Taylor expanded in x around 0 99.4%

    \[\leadsto \color{blue}{\left(-0.5 \cdot {x}^{2} + -1 \cdot x\right) - 1} \]
  4. Simplified99.4%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, x \cdot -0.5, -1 - x\right)} \]
    Proof

    [Start]99.4

    \[ \left(-0.5 \cdot {x}^{2} + -1 \cdot x\right) - 1 \]

    associate--l+ [=>]99.4

    \[ \color{blue}{-0.5 \cdot {x}^{2} + \left(-1 \cdot x - 1\right)} \]

    *-commutative [=>]99.4

    \[ \color{blue}{{x}^{2} \cdot -0.5} + \left(-1 \cdot x - 1\right) \]

    unpow2 [=>]99.4

    \[ \color{blue}{\left(x \cdot x\right)} \cdot -0.5 + \left(-1 \cdot x - 1\right) \]

    associate-*l* [=>]99.4

    \[ \color{blue}{x \cdot \left(x \cdot -0.5\right)} + \left(-1 \cdot x - 1\right) \]

    fma-def [=>]99.4

    \[ \color{blue}{\mathsf{fma}\left(x, x \cdot -0.5, -1 \cdot x - 1\right)} \]

    sub-neg [=>]99.4

    \[ \mathsf{fma}\left(x, x \cdot -0.5, \color{blue}{-1 \cdot x + \left(-1\right)}\right) \]

    metadata-eval [=>]99.4

    \[ \mathsf{fma}\left(x, x \cdot -0.5, -1 \cdot x + \color{blue}{-1}\right) \]

    +-commutative [=>]99.4

    \[ \mathsf{fma}\left(x, x \cdot -0.5, \color{blue}{-1 + -1 \cdot x}\right) \]

    mul-1-neg [=>]99.4

    \[ \mathsf{fma}\left(x, x \cdot -0.5, -1 + \color{blue}{\left(-x\right)}\right) \]

    unsub-neg [=>]99.4

    \[ \mathsf{fma}\left(x, x \cdot -0.5, \color{blue}{-1 - x}\right) \]
  5. Applied egg-rr99.4%

    \[\leadsto \color{blue}{\left(x \cdot \left(x \cdot -0.5\right) + -1\right) - x} \]
    Proof

    [Start]99.4

    \[ \mathsf{fma}\left(x, x \cdot -0.5, -1 - x\right) \]

    fma-udef [=>]99.4

    \[ \color{blue}{x \cdot \left(x \cdot -0.5\right) + \left(-1 - x\right)} \]

    associate-+r- [=>]99.4

    \[ \color{blue}{\left(x \cdot \left(x \cdot -0.5\right) + -1\right) - x} \]
  6. Final simplification99.4%

    \[\leadsto \left(x \cdot \left(x \cdot -0.5\right) + -1\right) - x \]

Alternatives

Alternative 1
Accuracy99.1%
Cost192
\[-1 - x \]
Alternative 2
Accuracy98.0%
Cost64
\[-1 \]

Error

Reproduce?

herbie shell --seed 2023152 
(FPCore (x)
  :name "qlog (example 3.10)"
  :precision binary64
  :pre (and (< -1.0 x) (< x 1.0))

  :herbie-target
  (- (+ (+ (+ 1.0 x) (/ (* x x) 2.0)) (* 0.4166666666666667 (pow x 3.0))))

  (/ (log (- 1.0 x)) (log (+ 1.0 x))))