
(FPCore (x) :precision binary64 (log (+ (/ 1.0 x) (/ (sqrt (- 1.0 (* x x))) x))))
double code(double x) {
return log(((1.0 / x) + (sqrt((1.0 - (x * x))) / x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(((1.0d0 / x) + (sqrt((1.0d0 - (x * x))) / x)))
end function
public static double code(double x) {
return Math.log(((1.0 / x) + (Math.sqrt((1.0 - (x * x))) / x)));
}
def code(x): return math.log(((1.0 / x) + (math.sqrt((1.0 - (x * x))) / x)))
function code(x) return log(Float64(Float64(1.0 / x) + Float64(sqrt(Float64(1.0 - Float64(x * x))) / x))) end
function tmp = code(x) tmp = log(((1.0 / x) + (sqrt((1.0 - (x * x))) / x))); end
code[x_] := N[Log[N[(N[(1.0 / x), $MachinePrecision] + N[(N[Sqrt[N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1}{x} + \frac{\sqrt{1 - x \cdot x}}{x}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (x) :precision binary64 (log (+ (/ 1.0 x) (/ (sqrt (- 1.0 (* x x))) x))))
double code(double x) {
return log(((1.0 / x) + (sqrt((1.0 - (x * x))) / x)));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(((1.0d0 / x) + (sqrt((1.0d0 - (x * x))) / x)))
end function
public static double code(double x) {
return Math.log(((1.0 / x) + (Math.sqrt((1.0 - (x * x))) / x)));
}
def code(x): return math.log(((1.0 / x) + (math.sqrt((1.0 - (x * x))) / x)))
function code(x) return log(Float64(Float64(1.0 / x) + Float64(sqrt(Float64(1.0 - Float64(x * x))) / x))) end
function tmp = code(x) tmp = log(((1.0 / x) + (sqrt((1.0 - (x * x))) / x))); end
code[x_] := N[Log[N[(N[(1.0 / x), $MachinePrecision] + N[(N[Sqrt[N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1}{x} + \frac{\sqrt{1 - x \cdot x}}{x}\right)
\end{array}
(FPCore (x) :precision binary64 (log (/ (+ (sqrt (- 1.0 (* x x))) 1.0) x)))
double code(double x) {
return log(((sqrt((1.0 - (x * x))) + 1.0) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(((sqrt((1.0d0 - (x * x))) + 1.0d0) / x))
end function
public static double code(double x) {
return Math.log(((Math.sqrt((1.0 - (x * x))) + 1.0) / x));
}
def code(x): return math.log(((math.sqrt((1.0 - (x * x))) + 1.0) / x))
function code(x) return log(Float64(Float64(sqrt(Float64(1.0 - Float64(x * x))) + 1.0) / x)) end
function tmp = code(x) tmp = log(((sqrt((1.0 - (x * x))) + 1.0) / x)); end
code[x_] := N[Log[N[(N[(N[Sqrt[N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] + 1.0), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\sqrt{1 - x \cdot x} + 1}{x}\right)
\end{array}
Initial program 100.0%
log-lowering-log.f64N/A
div-invN/A
distribute-rgt1-inN/A
un-div-invN/A
/-lowering-/.f64N/A
+-commutativeN/A
+-lowering-+.f64N/A
pow1/2N/A
rem-square-sqrtN/A
pow-lowering-pow.f64N/A
rem-square-sqrtN/A
--lowering--.f64N/A
*-lowering-*.f64100.0%
Applied egg-rr100.0%
unpow1/2N/A
sqrt-lowering-sqrt.f64N/A
--lowering--.f64N/A
*-lowering-*.f64100.0%
Applied egg-rr100.0%
Final simplification100.0%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x (+ -0.5 (* (* x x) (+ -0.125 (* (* x x) -0.0625))))))))
(log
(+
(/ (+ 2.0 (* x (* x (+ -0.5 (* (* (* x x) (* x x)) -0.03125))))) x)
(/ (/ (* t_0 t_0) (- t_0 2.0)) x)))))
double code(double x) {
double t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))));
return log((((2.0 + (x * (x * (-0.5 + (((x * x) * (x * x)) * -0.03125))))) / x) + (((t_0 * t_0) / (t_0 - 2.0)) / x)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * ((-0.5d0) + ((x * x) * ((-0.125d0) + ((x * x) * (-0.0625d0))))))
code = log((((2.0d0 + (x * (x * ((-0.5d0) + (((x * x) * (x * x)) * (-0.03125d0)))))) / x) + (((t_0 * t_0) / (t_0 - 2.0d0)) / x)))
end function
public static double code(double x) {
double t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))));
return Math.log((((2.0 + (x * (x * (-0.5 + (((x * x) * (x * x)) * -0.03125))))) / x) + (((t_0 * t_0) / (t_0 - 2.0)) / x)));
}
def code(x): t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625))))) return math.log((((2.0 + (x * (x * (-0.5 + (((x * x) * (x * x)) * -0.03125))))) / x) + (((t_0 * t_0) / (t_0 - 2.0)) / x)))
function code(x) t_0 = Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(-0.125 + Float64(Float64(x * x) * -0.0625)))))) return log(Float64(Float64(Float64(2.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(Float64(x * x) * Float64(x * x)) * -0.03125))))) / x) + Float64(Float64(Float64(t_0 * t_0) / Float64(t_0 - 2.0)) / x))) end
function tmp = code(x) t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625))))); tmp = log((((2.0 + (x * (x * (-0.5 + (((x * x) * (x * x)) * -0.03125))))) / x) + (((t_0 * t_0) / (t_0 - 2.0)) / x))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(-0.125 + N[(N[(x * x), $MachinePrecision] * -0.0625), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[Log[N[(N[(N[(2.0 + N[(x * N[(x * N[(-0.5 + N[(N[(N[(x * x), $MachinePrecision] * N[(x * x), $MachinePrecision]), $MachinePrecision] * -0.03125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] + N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] / N[(t$95$0 - 2.0), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(-0.125 + \left(x \cdot x\right) \cdot -0.0625\right)\right)\right)\\
\log \left(\frac{2 + x \cdot \left(x \cdot \left(-0.5 + \left(\left(x \cdot x\right) \cdot \left(x \cdot x\right)\right) \cdot -0.03125\right)\right)}{x} + \frac{\frac{t\_0 \cdot t\_0}{t\_0 - 2}}{x}\right)
\end{array}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
Simplified99.7%
flip-+N/A
div-subN/A
div-subN/A
--lowering--.f64N/A
Applied egg-rr99.7%
Taylor expanded in x around 0
/-lowering-/.f64N/A
Simplified99.7%
Final simplification99.7%
(FPCore (x)
:precision binary64
(let* ((t_0 (* x (* x (+ -0.5 (* (* x x) (+ -0.125 (* (* x x) -0.0625))))))))
(log
(+
(/ (/ 4.0 (- 2.0 t_0)) x)
(/ (/ (* t_0 t_0) (- (* x (* x -0.5)) 2.0)) x)))))
double code(double x) {
double t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))));
return log((((4.0 / (2.0 - t_0)) / x) + (((t_0 * t_0) / ((x * (x * -0.5)) - 2.0)) / x)));
}
real(8) function code(x)
real(8), intent (in) :: x
real(8) :: t_0
t_0 = x * (x * ((-0.5d0) + ((x * x) * ((-0.125d0) + ((x * x) * (-0.0625d0))))))
code = log((((4.0d0 / (2.0d0 - t_0)) / x) + (((t_0 * t_0) / ((x * (x * (-0.5d0))) - 2.0d0)) / x)))
end function
public static double code(double x) {
double t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))));
return Math.log((((4.0 / (2.0 - t_0)) / x) + (((t_0 * t_0) / ((x * (x * -0.5)) - 2.0)) / x)));
}
def code(x): t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625))))) return math.log((((4.0 / (2.0 - t_0)) / x) + (((t_0 * t_0) / ((x * (x * -0.5)) - 2.0)) / x)))
function code(x) t_0 = Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(-0.125 + Float64(Float64(x * x) * -0.0625)))))) return log(Float64(Float64(Float64(4.0 / Float64(2.0 - t_0)) / x) + Float64(Float64(Float64(t_0 * t_0) / Float64(Float64(x * Float64(x * -0.5)) - 2.0)) / x))) end
function tmp = code(x) t_0 = x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625))))); tmp = log((((4.0 / (2.0 - t_0)) / x) + (((t_0 * t_0) / ((x * (x * -0.5)) - 2.0)) / x))); end
code[x_] := Block[{t$95$0 = N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(-0.125 + N[(N[(x * x), $MachinePrecision] * -0.0625), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, N[Log[N[(N[(N[(4.0 / N[(2.0 - t$95$0), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] + N[(N[(N[(t$95$0 * t$95$0), $MachinePrecision] / N[(N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision] - 2.0), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(-0.125 + \left(x \cdot x\right) \cdot -0.0625\right)\right)\right)\\
\log \left(\frac{\frac{4}{2 - t\_0}}{x} + \frac{\frac{t\_0 \cdot t\_0}{x \cdot \left(x \cdot -0.5\right) - 2}}{x}\right)
\end{array}
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
Simplified99.7%
flip-+N/A
div-subN/A
div-subN/A
--lowering--.f64N/A
Applied egg-rr99.7%
Taylor expanded in x around 0
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-commutativeN/A
*-lowering-*.f64N/A
*-commutativeN/A
*-lowering-*.f6499.7%
Simplified99.7%
Final simplification99.7%
(FPCore (x)
:precision binary64
(log
(-
(/
(/
4.0
(- 2.0 (* x (* x (+ -0.5 (* (* x x) (+ -0.125 (* (* x x) -0.0625))))))))
x)
(* (* x (* x x)) (+ (* x (* x (+ 0.03125 (* (* x x) 0.0234375)))) 0.125)))))
double code(double x) {
return log((((4.0 / (2.0 - (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))) / x) - ((x * (x * x)) * ((x * (x * (0.03125 + ((x * x) * 0.0234375)))) + 0.125))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((((4.0d0 / (2.0d0 - (x * (x * ((-0.5d0) + ((x * x) * ((-0.125d0) + ((x * x) * (-0.0625d0))))))))) / x) - ((x * (x * x)) * ((x * (x * (0.03125d0 + ((x * x) * 0.0234375d0)))) + 0.125d0))))
end function
public static double code(double x) {
return Math.log((((4.0 / (2.0 - (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))) / x) - ((x * (x * x)) * ((x * (x * (0.03125 + ((x * x) * 0.0234375)))) + 0.125))));
}
def code(x): return math.log((((4.0 / (2.0 - (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))) / x) - ((x * (x * x)) * ((x * (x * (0.03125 + ((x * x) * 0.0234375)))) + 0.125))))
function code(x) return log(Float64(Float64(Float64(4.0 / Float64(2.0 - Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(-0.125 + Float64(Float64(x * x) * -0.0625)))))))) / x) - Float64(Float64(x * Float64(x * x)) * Float64(Float64(x * Float64(x * Float64(0.03125 + Float64(Float64(x * x) * 0.0234375)))) + 0.125)))) end
function tmp = code(x) tmp = log((((4.0 / (2.0 - (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))) / x) - ((x * (x * x)) * ((x * (x * (0.03125 + ((x * x) * 0.0234375)))) + 0.125)))); end
code[x_] := N[Log[N[(N[(N[(4.0 / N[(2.0 - N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(-0.125 + N[(N[(x * x), $MachinePrecision] * -0.0625), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision] - N[(N[(x * N[(x * x), $MachinePrecision]), $MachinePrecision] * N[(N[(x * N[(x * N[(0.03125 + N[(N[(x * x), $MachinePrecision] * 0.0234375), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] + 0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{\frac{4}{2 - x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(-0.125 + \left(x \cdot x\right) \cdot -0.0625\right)\right)\right)}}{x} - \left(x \cdot \left(x \cdot x\right)\right) \cdot \left(x \cdot \left(x \cdot \left(0.03125 + \left(x \cdot x\right) \cdot 0.0234375\right)\right) + 0.125\right)\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
Simplified99.7%
flip-+N/A
div-subN/A
div-subN/A
--lowering--.f64N/A
Applied egg-rr99.7%
Taylor expanded in x around 0
*-lowering-*.f64N/A
cube-multN/A
unpow2N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.7%
Simplified99.7%
Final simplification99.7%
(FPCore (x)
:precision binary64
(-
0.0
(log
(/
x
(+ 2.0 (* x (* x (+ -0.5 (* (* x x) (+ -0.125 (* (* x x) -0.0625)))))))))))
double code(double x) {
return 0.0 - log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0 - log((x / (2.0d0 + (x * (x * ((-0.5d0) + ((x * x) * ((-0.125d0) + ((x * x) * (-0.0625d0))))))))))
end function
public static double code(double x) {
return 0.0 - Math.log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))));
}
def code(x): return 0.0 - math.log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))))))
function code(x) return Float64(0.0 - log(Float64(x / Float64(2.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * Float64(-0.125 + Float64(Float64(x * x) * -0.0625)))))))))) end
function tmp = code(x) tmp = 0.0 - log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625))))))))); end
code[x_] := N[(0.0 - N[Log[N[(x / N[(2.0 + N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(-0.125 + N[(N[(x * x), $MachinePrecision] * -0.0625), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0 - \log \left(\frac{x}{2 + x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(-0.125 + \left(x \cdot x\right) \cdot -0.0625\right)\right)\right)}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
Simplified99.7%
clear-numN/A
log-recN/A
neg-lowering-neg.f64N/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
Applied egg-rr99.7%
Final simplification99.7%
(FPCore (x) :precision binary64 (log (/ (+ 2.0 (* (* x x) (+ -0.5 (* (* x x) (+ -0.125 (* (* x x) -0.0625)))))) x)))
double code(double x) {
return log(((2.0 + ((x * x) * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(((2.0d0 + ((x * x) * ((-0.5d0) + ((x * x) * ((-0.125d0) + ((x * x) * (-0.0625d0))))))) / x))
end function
public static double code(double x) {
return Math.log(((2.0 + ((x * x) * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))) / x));
}
def code(x): return math.log(((2.0 + ((x * x) * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))) / x))
function code(x) return log(Float64(Float64(2.0 + Float64(Float64(x * x) * Float64(-0.5 + Float64(Float64(x * x) * Float64(-0.125 + Float64(Float64(x * x) * -0.0625)))))) / x)) end
function tmp = code(x) tmp = log(((2.0 + ((x * x) * (-0.5 + ((x * x) * (-0.125 + ((x * x) * -0.0625)))))) / x)); end
code[x_] := N[Log[N[(N[(2.0 + N[(N[(x * x), $MachinePrecision] * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * N[(-0.125 + N[(N[(x * x), $MachinePrecision] * -0.0625), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{2 + \left(x \cdot x\right) \cdot \left(-0.5 + \left(x \cdot x\right) \cdot \left(-0.125 + \left(x \cdot x\right) \cdot -0.0625\right)\right)}{x}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
Simplified99.7%
(FPCore (x) :precision binary64 (log (/ 1.0 (/ x (+ 2.0 (* x (* x (+ -0.5 (* (* x x) -0.125)))))))))
double code(double x) {
return log((1.0 / (x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125))))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((1.0d0 / (x / (2.0d0 + (x * (x * ((-0.5d0) + ((x * x) * (-0.125d0)))))))))
end function
public static double code(double x) {
return Math.log((1.0 / (x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125))))))));
}
def code(x): return math.log((1.0 / (x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125))))))))
function code(x) return log(Float64(1.0 / Float64(x / Float64(2.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * -0.125)))))))) end
function tmp = code(x) tmp = log((1.0 / (x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125)))))))); end
code[x_] := N[Log[N[(1.0 / N[(x / N[(2.0 + N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * -0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{1}{\frac{x}{2 + x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot -0.125\right)\right)}}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.6%
Simplified99.6%
clear-numN/A
/-lowering-/.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
(FPCore (x) :precision binary64 (- 0.0 (log (/ x (+ 2.0 (* x (* x (+ -0.5 (* (* x x) -0.125)))))))))
double code(double x) {
return 0.0 - log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125)))))));
}
real(8) function code(x)
real(8), intent (in) :: x
code = 0.0d0 - log((x / (2.0d0 + (x * (x * ((-0.5d0) + ((x * x) * (-0.125d0))))))))
end function
public static double code(double x) {
return 0.0 - Math.log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125)))))));
}
def code(x): return 0.0 - math.log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125)))))))
function code(x) return Float64(0.0 - log(Float64(x / Float64(2.0 + Float64(x * Float64(x * Float64(-0.5 + Float64(Float64(x * x) * -0.125)))))))) end
function tmp = code(x) tmp = 0.0 - log((x / (2.0 + (x * (x * (-0.5 + ((x * x) * -0.125))))))); end
code[x_] := N[(0.0 - N[Log[N[(x / N[(2.0 + N[(x * N[(x * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * -0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
0 - \log \left(\frac{x}{2 + x \cdot \left(x \cdot \left(-0.5 + \left(x \cdot x\right) \cdot -0.125\right)\right)}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.6%
Simplified99.6%
clear-numN/A
log-recN/A
neg-lowering-neg.f64N/A
log-lowering-log.f64N/A
/-lowering-/.f64N/A
+-lowering-+.f64N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.6%
Applied egg-rr99.6%
Final simplification99.6%
(FPCore (x) :precision binary64 (log (/ (+ 2.0 (* (* x x) (+ -0.5 (* (* x x) -0.125)))) x)))
double code(double x) {
return log(((2.0 + ((x * x) * (-0.5 + ((x * x) * -0.125)))) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(((2.0d0 + ((x * x) * ((-0.5d0) + ((x * x) * (-0.125d0))))) / x))
end function
public static double code(double x) {
return Math.log(((2.0 + ((x * x) * (-0.5 + ((x * x) * -0.125)))) / x));
}
def code(x): return math.log(((2.0 + ((x * x) * (-0.5 + ((x * x) * -0.125)))) / x))
function code(x) return log(Float64(Float64(2.0 + Float64(Float64(x * x) * Float64(-0.5 + Float64(Float64(x * x) * -0.125)))) / x)) end
function tmp = code(x) tmp = log(((2.0 + ((x * x) * (-0.5 + ((x * x) * -0.125)))) / x)); end
code[x_] := N[Log[N[(N[(2.0 + N[(N[(x * x), $MachinePrecision] * N[(-0.5 + N[(N[(x * x), $MachinePrecision] * -0.125), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{2 + \left(x \cdot x\right) \cdot \left(-0.5 + \left(x \cdot x\right) \cdot -0.125\right)}{x}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f64N/A
sub-negN/A
metadata-evalN/A
+-commutativeN/A
+-lowering-+.f64N/A
*-commutativeN/A
*-lowering-*.f64N/A
unpow2N/A
*-lowering-*.f6499.6%
Simplified99.6%
(FPCore (x) :precision binary64 (log (/ (+ 2.0 (* x (* x -0.5))) x)))
double code(double x) {
return log(((2.0 + (x * (x * -0.5))) / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log(((2.0d0 + (x * (x * (-0.5d0)))) / x))
end function
public static double code(double x) {
return Math.log(((2.0 + (x * (x * -0.5))) / x));
}
def code(x): return math.log(((2.0 + (x * (x * -0.5))) / x))
function code(x) return log(Float64(Float64(2.0 + Float64(x * Float64(x * -0.5))) / x)) end
function tmp = code(x) tmp = log(((2.0 + (x * (x * -0.5))) / x)); end
code[x_] := N[Log[N[(N[(2.0 + N[(x * N[(x * -0.5), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{2 + x \cdot \left(x \cdot -0.5\right)}{x}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f64N/A
+-lowering-+.f64N/A
*-commutativeN/A
unpow2N/A
associate-*l*N/A
*-lowering-*.f64N/A
*-lowering-*.f6499.5%
Simplified99.5%
(FPCore (x) :precision binary64 (log (/ 2.0 x)))
double code(double x) {
return log((2.0 / x));
}
real(8) function code(x)
real(8), intent (in) :: x
code = log((2.0d0 / x))
end function
public static double code(double x) {
return Math.log((2.0 / x));
}
def code(x): return math.log((2.0 / x))
function code(x) return log(Float64(2.0 / x)) end
function tmp = code(x) tmp = log((2.0 / x)); end
code[x_] := N[Log[N[(2.0 / x), $MachinePrecision]], $MachinePrecision]
\begin{array}{l}
\\
\log \left(\frac{2}{x}\right)
\end{array}
Initial program 100.0%
Taylor expanded in x around 0
/-lowering-/.f6499.0%
Simplified99.0%
herbie shell --seed 2024152
(FPCore (x)
:name "Hyperbolic arc-(co)secant"
:precision binary64
(log (+ (/ 1.0 x) (/ (sqrt (- 1.0 (* x x))) x))))