Rust f32::acosh

Percentage Accurate: 53.0% → 98.9%
Time: 9.2s
Alternatives: 8
Speedup: 2.0×

Specification

?
\[x \geq 1\]
\[\begin{array}{l} \\ \cosh^{-1} x \end{array} \]
(FPCore (x) :precision binary32 (acosh x))
float code(float x) {
	return acoshf(x);
}
function code(x)
	return acosh(x)
end
function tmp = code(x)
	tmp = acosh(x);
end
\begin{array}{l}

\\
\cosh^{-1} x
\end{array}

Sampling outcomes in binary32 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 53.0% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \log \left(x + \sqrt{x \cdot x - 1}\right) \end{array} \]
(FPCore (x) :precision binary32 (log (+ x (sqrt (- (* x x) 1.0)))))
float code(float x) {
	return logf((x + sqrtf(((x * x) - 1.0f))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x + sqrt(((x * x) - 1.0e0))))
end function
function code(x)
	return log(Float32(x + sqrt(Float32(Float32(x * x) - Float32(1.0)))))
end
function tmp = code(x)
	tmp = log((x + sqrt(((x * x) - single(1.0)))));
end
\begin{array}{l}

\\
\log \left(x + \sqrt{x \cdot x - 1}\right)
\end{array}

Alternative 1: 98.9% accurate, 1.6× speedup?

\[\begin{array}{l} \\ \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{-0.5}{x \cdot x}\right)\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log
  (+
   x
   (*
    x
    (+
     (- 1.0 (/ (+ 0.125 (/ 0.0625 (* x x))) (* x (* x (* x x)))))
     (/ -0.5 (* x x)))))))
float code(float x) {
	return logf((x + (x * ((1.0f - ((0.125f + (0.0625f / (x * x))) / (x * (x * (x * x))))) + (-0.5f / (x * x))))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x + (x * ((1.0e0 - ((0.125e0 + (0.0625e0 / (x * x))) / (x * (x * (x * x))))) + ((-0.5e0) / (x * x))))))
end function
function code(x)
	return log(Float32(x + Float32(x * Float32(Float32(Float32(1.0) - Float32(Float32(Float32(0.125) + Float32(Float32(0.0625) / Float32(x * x))) / Float32(x * Float32(x * Float32(x * x))))) + Float32(Float32(-0.5) / Float32(x * x))))))
end
function tmp = code(x)
	tmp = log((x + (x * ((single(1.0) - ((single(0.125) + (single(0.0625) / (x * x))) / (x * (x * (x * x))))) + (single(-0.5) / (x * x))))));
end
\begin{array}{l}

\\
\log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{-0.5}{x \cdot x}\right)\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 99.4%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(\left(1 + -1 \cdot \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right) - 0.5 \cdot \frac{1}{{x}^{2}}\right)}\right) \]
  4. Step-by-step derivation
    1. sub-neg99.4%

      \[\leadsto \log \left(x + x \cdot \color{blue}{\left(\left(1 + -1 \cdot \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)}\right) \]
    2. mul-1-neg99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 + \color{blue}{\left(-\frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right)}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    3. unsub-neg99.4%

      \[\leadsto \log \left(x + x \cdot \left(\color{blue}{\left(1 - \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right)} + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    4. unpow299.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + 0.0625 \cdot \frac{1}{\color{blue}{x \cdot x}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    5. associate-*r/99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \color{blue}{\frac{0.0625 \cdot 1}{x \cdot x}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    6. metadata-eval99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{\color{blue}{0.0625}}{x \cdot x}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    7. metadata-eval99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{{x}^{\color{blue}{\left(3 + 1\right)}}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    8. pow-plus99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{{x}^{3} \cdot x}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    9. cube-unmult99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{\left(x \cdot \left(x \cdot x\right)\right)} \cdot x}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    10. *-commutative99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    11. unpow299.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-0.5 \cdot \frac{1}{\color{blue}{x \cdot x}}\right)\right)\right) \]
    12. associate-*r/99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-\color{blue}{\frac{0.5 \cdot 1}{x \cdot x}}\right)\right)\right) \]
    13. metadata-eval99.4%

      \[\leadsto \log \left(x + x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-\frac{\color{blue}{0.5}}{x \cdot x}\right)\right)\right) \]
  5. Simplified99.4%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(\left(1 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{-0.5}{x \cdot x}\right)}\right) \]
  6. Add Preprocessing

Alternative 2: 98.9% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{\frac{-0.5}{x}}{x}\right)\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log
  (*
   x
   (+
    (- 2.0 (/ (+ 0.125 (/ 0.0625 (* x x))) (* x (* x (* x x)))))
    (/ (/ -0.5 x) x)))))
float code(float x) {
	return logf((x * ((2.0f - ((0.125f + (0.0625f / (x * x))) / (x * (x * (x * x))))) + ((-0.5f / x) / x))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x * ((2.0e0 - ((0.125e0 + (0.0625e0 / (x * x))) / (x * (x * (x * x))))) + (((-0.5e0) / x) / x))))
end function
function code(x)
	return log(Float32(x * Float32(Float32(Float32(2.0) - Float32(Float32(Float32(0.125) + Float32(Float32(0.0625) / Float32(x * x))) / Float32(x * Float32(x * Float32(x * x))))) + Float32(Float32(Float32(-0.5) / x) / x))))
end
function tmp = code(x)
	tmp = log((x * ((single(2.0) - ((single(0.125) + (single(0.0625) / (x * x))) / (x * (x * (x * x))))) + ((single(-0.5) / x) / x))));
end
\begin{array}{l}

\\
\log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{\frac{-0.5}{x}}{x}\right)\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 99.4%

    \[\leadsto \log \color{blue}{\left(x \cdot \left(\left(2 + -1 \cdot \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right) - 0.5 \cdot \frac{1}{{x}^{2}}\right)\right)} \]
  4. Step-by-step derivation
    1. sub-neg99.4%

      \[\leadsto \log \left(x \cdot \color{blue}{\left(\left(2 + -1 \cdot \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)}\right) \]
    2. mul-1-neg99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 + \color{blue}{\left(-\frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right)}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    3. unsub-neg99.4%

      \[\leadsto \log \left(x \cdot \left(\color{blue}{\left(2 - \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right)} + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    4. unpow299.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + 0.0625 \cdot \frac{1}{\color{blue}{x \cdot x}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    5. associate-*r/99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \color{blue}{\frac{0.0625 \cdot 1}{x \cdot x}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    6. metadata-eval99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{\color{blue}{0.0625}}{x \cdot x}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    7. metadata-eval99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{{x}^{\color{blue}{\left(3 + 1\right)}}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    8. pow-plus99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{{x}^{3} \cdot x}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    9. cube-unmult99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{\left(x \cdot \left(x \cdot x\right)\right)} \cdot x}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    10. *-commutative99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    11. unpow299.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-0.5 \cdot \frac{1}{\color{blue}{x \cdot x}}\right)\right)\right) \]
    12. associate-*r/99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-\color{blue}{\frac{0.5 \cdot 1}{x \cdot x}}\right)\right)\right) \]
    13. metadata-eval99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-\frac{\color{blue}{0.5}}{x \cdot x}\right)\right)\right) \]
  5. Simplified99.4%

    \[\leadsto \log \color{blue}{\left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{-0.5}{x \cdot x}\right)\right)} \]
  6. Step-by-step derivation
    1. associate-/r*99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \color{blue}{\frac{\frac{-0.5}{x}}{x}}\right)\right) \]
  7. Applied egg-rr99.4%

    \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \color{blue}{\frac{\frac{-0.5}{x}}{x}}\right)\right) \]
  8. Add Preprocessing

Alternative 3: 98.9% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \log \left(x \cdot \left(\frac{-0.5}{x \cdot x} + \left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right)\right)\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log
  (*
   x
   (+
    (/ -0.5 (* x x))
    (- 2.0 (/ (+ 0.125 (/ 0.0625 (* x x))) (* x (* x (* x x)))))))))
float code(float x) {
	return logf((x * ((-0.5f / (x * x)) + (2.0f - ((0.125f + (0.0625f / (x * x))) / (x * (x * (x * x))))))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x * (((-0.5e0) / (x * x)) + (2.0e0 - ((0.125e0 + (0.0625e0 / (x * x))) / (x * (x * (x * x))))))))
end function
function code(x)
	return log(Float32(x * Float32(Float32(Float32(-0.5) / Float32(x * x)) + Float32(Float32(2.0) - Float32(Float32(Float32(0.125) + Float32(Float32(0.0625) / Float32(x * x))) / Float32(x * Float32(x * Float32(x * x))))))))
end
function tmp = code(x)
	tmp = log((x * ((single(-0.5) / (x * x)) + (single(2.0) - ((single(0.125) + (single(0.0625) / (x * x))) / (x * (x * (x * x))))))));
end
\begin{array}{l}

\\
\log \left(x \cdot \left(\frac{-0.5}{x \cdot x} + \left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right)\right)\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 99.4%

    \[\leadsto \log \color{blue}{\left(x \cdot \left(\left(2 + -1 \cdot \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right) - 0.5 \cdot \frac{1}{{x}^{2}}\right)\right)} \]
  4. Step-by-step derivation
    1. sub-neg99.4%

      \[\leadsto \log \left(x \cdot \color{blue}{\left(\left(2 + -1 \cdot \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)}\right) \]
    2. mul-1-neg99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 + \color{blue}{\left(-\frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right)}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    3. unsub-neg99.4%

      \[\leadsto \log \left(x \cdot \left(\color{blue}{\left(2 - \frac{0.125 + 0.0625 \cdot \frac{1}{{x}^{2}}}{{x}^{4}}\right)} + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    4. unpow299.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + 0.0625 \cdot \frac{1}{\color{blue}{x \cdot x}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    5. associate-*r/99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \color{blue}{\frac{0.0625 \cdot 1}{x \cdot x}}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    6. metadata-eval99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{\color{blue}{0.0625}}{x \cdot x}}{{x}^{4}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    7. metadata-eval99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{{x}^{\color{blue}{\left(3 + 1\right)}}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    8. pow-plus99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{{x}^{3} \cdot x}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    9. cube-unmult99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{\left(x \cdot \left(x \cdot x\right)\right)} \cdot x}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    10. *-commutative99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{\color{blue}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}}\right) + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)\right) \]
    11. unpow299.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-0.5 \cdot \frac{1}{\color{blue}{x \cdot x}}\right)\right)\right) \]
    12. associate-*r/99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-\color{blue}{\frac{0.5 \cdot 1}{x \cdot x}}\right)\right)\right) \]
    13. metadata-eval99.4%

      \[\leadsto \log \left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \left(-\frac{\color{blue}{0.5}}{x \cdot x}\right)\right)\right) \]
  5. Simplified99.4%

    \[\leadsto \log \color{blue}{\left(x \cdot \left(\left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right) + \frac{-0.5}{x \cdot x}\right)\right)} \]
  6. Final simplification99.4%

    \[\leadsto \log \left(x \cdot \left(\frac{-0.5}{x \cdot x} + \left(2 - \frac{0.125 + \frac{0.0625}{x \cdot x}}{x \cdot \left(x \cdot \left(x \cdot x\right)\right)}\right)\right)\right) \]
  7. Add Preprocessing

Alternative 4: 98.7% accurate, 1.7× speedup?

\[\begin{array}{l} \\ \log \left(x + \left(\left(1 + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right) + -1\right)\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log
  (+ x (+ (+ 1.0 (* x (- 1.0 (/ (+ 0.5 (/ 0.125 (* x x))) (* x x))))) -1.0))))
float code(float x) {
	return logf((x + ((1.0f + (x * (1.0f - ((0.5f + (0.125f / (x * x))) / (x * x))))) + -1.0f)));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x + ((1.0e0 + (x * (1.0e0 - ((0.5e0 + (0.125e0 / (x * x))) / (x * x))))) + (-1.0e0))))
end function
function code(x)
	return log(Float32(x + Float32(Float32(Float32(1.0) + Float32(x * Float32(Float32(1.0) - Float32(Float32(Float32(0.5) + Float32(Float32(0.125) / Float32(x * x))) / Float32(x * x))))) + Float32(-1.0))))
end
function tmp = code(x)
	tmp = log((x + ((single(1.0) + (x * (single(1.0) - ((single(0.5) + (single(0.125) / (x * x))) / (x * x))))) + single(-1.0))));
end
\begin{array}{l}

\\
\log \left(x + \left(\left(1 + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right) + -1\right)\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 99.2%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(1 + -1 \cdot \frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right) \]
  4. Step-by-step derivation
    1. mul-1-neg99.2%

      \[\leadsto \log \left(x + x \cdot \left(1 + \color{blue}{\left(-\frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right)\right) \]
    2. unsub-neg99.2%

      \[\leadsto \log \left(x + x \cdot \color{blue}{\left(1 - \frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right) \]
    3. unpow299.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + 0.125 \cdot \frac{1}{\color{blue}{x \cdot x}}}{{x}^{2}}\right)\right) \]
    4. associate-*r/99.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + \color{blue}{\frac{0.125 \cdot 1}{x \cdot x}}}{{x}^{2}}\right)\right) \]
    5. metadata-eval99.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + \frac{\color{blue}{0.125}}{x \cdot x}}{{x}^{2}}\right)\right) \]
    6. unpow299.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{\color{blue}{x \cdot x}}\right)\right) \]
  5. Simplified99.2%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)}\right) \]
  6. Step-by-step derivation
    1. expm1-log1p-u98.7%

      \[\leadsto \log \left(x + \color{blue}{\mathsf{expm1}\left(\mathsf{log1p}\left(x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)\right)}\right) \]
    2. expm1-undefine98.7%

      \[\leadsto \log \left(x + \color{blue}{\left(e^{\mathsf{log1p}\left(x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)} - 1\right)}\right) \]
  7. Applied egg-rr98.7%

    \[\leadsto \log \left(x + \color{blue}{\left(e^{\mathsf{log1p}\left(x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)} - 1\right)}\right) \]
  8. Step-by-step derivation
    1. sub-neg98.7%

      \[\leadsto \log \left(x + \color{blue}{\left(e^{\mathsf{log1p}\left(x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)} + \left(-1\right)\right)}\right) \]
    2. log1p-undefine98.7%

      \[\leadsto \log \left(x + \left(e^{\color{blue}{\log \left(1 + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)}} + \left(-1\right)\right)\right) \]
    3. rem-exp-log99.2%

      \[\leadsto \log \left(x + \left(\color{blue}{\left(1 + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)} + \left(-1\right)\right)\right) \]
    4. metadata-eval99.2%

      \[\leadsto \log \left(x + \left(\left(1 + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right) + \color{blue}{-1}\right)\right) \]
  9. Simplified99.2%

    \[\leadsto \log \left(x + \color{blue}{\left(\left(1 + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right) + -1\right)}\right) \]
  10. Add Preprocessing

Alternative 5: 98.7% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \log \left(x + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log (+ x (* x (- 1.0 (/ (+ 0.5 (/ 0.125 (* x x))) (* x x)))))))
float code(float x) {
	return logf((x + (x * (1.0f - ((0.5f + (0.125f / (x * x))) / (x * x))))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x + (x * (1.0e0 - ((0.5e0 + (0.125e0 / (x * x))) / (x * x))))))
end function
function code(x)
	return log(Float32(x + Float32(x * Float32(Float32(1.0) - Float32(Float32(Float32(0.5) + Float32(Float32(0.125) / Float32(x * x))) / Float32(x * x))))))
end
function tmp = code(x)
	tmp = log((x + (x * (single(1.0) - ((single(0.5) + (single(0.125) / (x * x))) / (x * x))))));
end
\begin{array}{l}

\\
\log \left(x + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 99.2%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(1 + -1 \cdot \frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right) \]
  4. Step-by-step derivation
    1. mul-1-neg99.2%

      \[\leadsto \log \left(x + x \cdot \left(1 + \color{blue}{\left(-\frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right)\right) \]
    2. unsub-neg99.2%

      \[\leadsto \log \left(x + x \cdot \color{blue}{\left(1 - \frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right) \]
    3. unpow299.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + 0.125 \cdot \frac{1}{\color{blue}{x \cdot x}}}{{x}^{2}}\right)\right) \]
    4. associate-*r/99.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + \color{blue}{\frac{0.125 \cdot 1}{x \cdot x}}}{{x}^{2}}\right)\right) \]
    5. metadata-eval99.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + \frac{\color{blue}{0.125}}{x \cdot x}}{{x}^{2}}\right)\right) \]
    6. unpow299.2%

      \[\leadsto \log \left(x + x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{\color{blue}{x \cdot x}}\right)\right) \]
  5. Simplified99.2%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(1 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)}\right) \]
  6. Add Preprocessing

Alternative 6: 98.7% accurate, 1.8× speedup?

\[\begin{array}{l} \\ \log \left(x \cdot \left(2 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log (* x (- 2.0 (/ (+ 0.5 (/ 0.125 (* x x))) (* x x))))))
float code(float x) {
	return logf((x * (2.0f - ((0.5f + (0.125f / (x * x))) / (x * x)))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x * (2.0e0 - ((0.5e0 + (0.125e0 / (x * x))) / (x * x)))))
end function
function code(x)
	return log(Float32(x * Float32(Float32(2.0) - Float32(Float32(Float32(0.5) + Float32(Float32(0.125) / Float32(x * x))) / Float32(x * x)))))
end
function tmp = code(x)
	tmp = log((x * (single(2.0) - ((single(0.5) + (single(0.125) / (x * x))) / (x * x)))));
end
\begin{array}{l}

\\
\log \left(x \cdot \left(2 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 99.2%

    \[\leadsto \log \color{blue}{\left(x \cdot \left(2 + -1 \cdot \frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)\right)} \]
  4. Step-by-step derivation
    1. mul-1-neg99.2%

      \[\leadsto \log \left(x \cdot \left(2 + \color{blue}{\left(-\frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right)\right) \]
    2. unsub-neg99.2%

      \[\leadsto \log \left(x \cdot \color{blue}{\left(2 - \frac{0.5 + 0.125 \cdot \frac{1}{{x}^{2}}}{{x}^{2}}\right)}\right) \]
    3. unpow299.2%

      \[\leadsto \log \left(x \cdot \left(2 - \frac{0.5 + 0.125 \cdot \frac{1}{\color{blue}{x \cdot x}}}{{x}^{2}}\right)\right) \]
    4. associate-*r/99.2%

      \[\leadsto \log \left(x \cdot \left(2 - \frac{0.5 + \color{blue}{\frac{0.125 \cdot 1}{x \cdot x}}}{{x}^{2}}\right)\right) \]
    5. metadata-eval99.2%

      \[\leadsto \log \left(x \cdot \left(2 - \frac{0.5 + \frac{\color{blue}{0.125}}{x \cdot x}}{{x}^{2}}\right)\right) \]
    6. unpow299.2%

      \[\leadsto \log \left(x \cdot \left(2 - \frac{0.5 + \frac{0.125}{x \cdot x}}{\color{blue}{x \cdot x}}\right)\right) \]
  5. Simplified99.2%

    \[\leadsto \log \color{blue}{\left(x \cdot \left(2 - \frac{0.5 + \frac{0.125}{x \cdot x}}{x \cdot x}\right)\right)} \]
  6. Add Preprocessing

Alternative 7: 98.3% accurate, 1.9× speedup?

\[\begin{array}{l} \\ \log \left(\frac{-0.5}{x} + x \cdot 2\right) \end{array} \]
(FPCore (x) :precision binary32 (log (+ (/ -0.5 x) (* x 2.0))))
float code(float x) {
	return logf(((-0.5f / x) + (x * 2.0f)));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((((-0.5e0) / x) + (x * 2.0e0)))
end function
function code(x)
	return log(Float32(Float32(Float32(-0.5) / x) + Float32(x * Float32(2.0))))
end
function tmp = code(x)
	tmp = log(((single(-0.5) / x) + (x * single(2.0))));
end
\begin{array}{l}

\\
\log \left(\frac{-0.5}{x} + x \cdot 2\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 98.7%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(1 - 0.5 \cdot \frac{1}{{x}^{2}}\right)}\right) \]
  4. Step-by-step derivation
    1. sub-neg98.7%

      \[\leadsto \log \left(x + x \cdot \color{blue}{\left(1 + \left(-0.5 \cdot \frac{1}{{x}^{2}}\right)\right)}\right) \]
    2. unpow298.7%

      \[\leadsto \log \left(x + x \cdot \left(1 + \left(-0.5 \cdot \frac{1}{\color{blue}{x \cdot x}}\right)\right)\right) \]
    3. associate-*r/98.7%

      \[\leadsto \log \left(x + x \cdot \left(1 + \left(-\color{blue}{\frac{0.5 \cdot 1}{x \cdot x}}\right)\right)\right) \]
    4. metadata-eval98.7%

      \[\leadsto \log \left(x + x \cdot \left(1 + \left(-\frac{\color{blue}{0.5}}{x \cdot x}\right)\right)\right) \]
    5. distribute-neg-frac98.7%

      \[\leadsto \log \left(x + x \cdot \left(1 + \color{blue}{\frac{-0.5}{x \cdot x}}\right)\right) \]
    6. metadata-eval98.7%

      \[\leadsto \log \left(x + x \cdot \left(1 + \frac{\color{blue}{-0.5}}{x \cdot x}\right)\right) \]
  5. Simplified98.7%

    \[\leadsto \log \left(x + \color{blue}{x \cdot \left(1 + \frac{-0.5}{x \cdot x}\right)}\right) \]
  6. Taylor expanded in x around 0 53.0%

    \[\leadsto \log \left(x + \color{blue}{\frac{{x}^{2} - 0.5}{x}}\right) \]
  7. Step-by-step derivation
    1. unpow253.0%

      \[\leadsto \log \left(x + \frac{\color{blue}{x \cdot x} - 0.5}{x}\right) \]
  8. Simplified53.0%

    \[\leadsto \log \left(x + \color{blue}{\frac{x \cdot x - 0.5}{x}}\right) \]
  9. Step-by-step derivation
    1. div-sub53.0%

      \[\leadsto \log \left(x + \color{blue}{\left(\frac{x \cdot x}{x} - \frac{0.5}{x}\right)}\right) \]
    2. pow253.0%

      \[\leadsto \log \left(x + \left(\frac{\color{blue}{{x}^{2}}}{x} - \frac{0.5}{x}\right)\right) \]
    3. pow153.0%

      \[\leadsto \log \left(x + \left(\frac{{x}^{2}}{\color{blue}{{x}^{1}}} - \frac{0.5}{x}\right)\right) \]
    4. pow-div98.7%

      \[\leadsto \log \left(x + \left(\color{blue}{{x}^{\left(2 - 1\right)}} - \frac{0.5}{x}\right)\right) \]
    5. metadata-eval98.7%

      \[\leadsto \log \left(x + \left({x}^{\color{blue}{1}} - \frac{0.5}{x}\right)\right) \]
    6. pow198.7%

      \[\leadsto \log \left(x + \left(\color{blue}{x} - \frac{0.5}{x}\right)\right) \]
  10. Applied egg-rr98.7%

    \[\leadsto \log \color{blue}{\left(x + \left(x - \frac{0.5}{x}\right)\right)} \]
  11. Step-by-step derivation
    1. associate-+r-98.7%

      \[\leadsto \log \color{blue}{\left(\left(x + x\right) - \frac{0.5}{x}\right)} \]
    2. sub-neg98.7%

      \[\leadsto \log \color{blue}{\left(\left(x + x\right) + \left(-\frac{0.5}{x}\right)\right)} \]
    3. count-298.7%

      \[\leadsto \log \left(\color{blue}{2 \cdot x} + \left(-\frac{0.5}{x}\right)\right) \]
    4. *-commutative98.7%

      \[\leadsto \log \left(\color{blue}{x \cdot 2} + \left(-\frac{0.5}{x}\right)\right) \]
    5. distribute-neg-frac98.7%

      \[\leadsto \log \left(x \cdot 2 + \color{blue}{\frac{-0.5}{x}}\right) \]
    6. metadata-eval98.7%

      \[\leadsto \log \left(x \cdot 2 + \frac{\color{blue}{-0.5}}{x}\right) \]
  12. Simplified98.7%

    \[\leadsto \log \color{blue}{\left(x \cdot 2 + \frac{-0.5}{x}\right)} \]
  13. Final simplification98.7%

    \[\leadsto \log \left(\frac{-0.5}{x} + x \cdot 2\right) \]
  14. Add Preprocessing

Alternative 8: 96.9% accurate, 2.0× speedup?

\[\begin{array}{l} \\ \log \left(x + x\right) \end{array} \]
(FPCore (x) :precision binary32 (log (+ x x)))
float code(float x) {
	return logf((x + x));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x + x))
end function
function code(x)
	return log(Float32(x + x))
end
function tmp = code(x)
	tmp = log((x + x));
end
\begin{array}{l}

\\
\log \left(x + x\right)
\end{array}
Derivation
  1. Initial program 54.3%

    \[\log \left(x + \sqrt{x \cdot x - 1}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf 96.5%

    \[\leadsto \log \left(x + \color{blue}{x}\right) \]
  4. Add Preprocessing

Developer target: 99.3% accurate, 0.7× speedup?

\[\begin{array}{l} \\ \log \left(x + \sqrt{x - 1} \cdot \sqrt{x + 1}\right) \end{array} \]
(FPCore (x)
 :precision binary32
 (log (+ x (* (sqrt (- x 1.0)) (sqrt (+ x 1.0))))))
float code(float x) {
	return logf((x + (sqrtf((x - 1.0f)) * sqrtf((x + 1.0f)))));
}
real(4) function code(x)
    real(4), intent (in) :: x
    code = log((x + (sqrt((x - 1.0e0)) * sqrt((x + 1.0e0)))))
end function
function code(x)
	return log(Float32(x + Float32(sqrt(Float32(x - Float32(1.0))) * sqrt(Float32(x + Float32(1.0))))))
end
function tmp = code(x)
	tmp = log((x + (sqrt((x - single(1.0))) * sqrt((x + single(1.0))))));
end
\begin{array}{l}

\\
\log \left(x + \sqrt{x - 1} \cdot \sqrt{x + 1}\right)
\end{array}

Reproduce

?
herbie shell --seed 2024107 
(FPCore (x)
  :name "Rust f32::acosh"
  :precision binary32
  :pre (>= x 1.0)

  :alt
  (log (+ x (* (sqrt (- x 1.0)) (sqrt (+ x 1.0)))))

  (log (+ x (sqrt (- (* x x) 1.0)))))