
(FPCore (d h l M D) :precision binary64 (* (* (pow (/ d h) (/ 1.0 2.0)) (pow (/ d l) (/ 1.0 2.0))) (- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l)))))
double code(double d, double h, double l, double M, double D) {
return (pow((d / h), (1.0 / 2.0)) * pow((d / l), (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
code = (((d / h) ** (1.0d0 / 2.0d0)) * ((d / l) ** (1.0d0 / 2.0d0))) * (1.0d0 - (((1.0d0 / 2.0d0) * (((m * d_1) / (2.0d0 * d)) ** 2.0d0)) * (h / l)))
end function
public static double code(double d, double h, double l, double M, double D) {
return (Math.pow((d / h), (1.0 / 2.0)) * Math.pow((d / l), (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * Math.pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)));
}
def code(d, h, l, M, D): return (math.pow((d / h), (1.0 / 2.0)) * math.pow((d / l), (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * math.pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)))
function code(d, h, l, M, D) return Float64(Float64((Float64(d / h) ^ Float64(1.0 / 2.0)) * (Float64(d / l) ^ Float64(1.0 / 2.0))) * Float64(1.0 - Float64(Float64(Float64(1.0 / 2.0) * (Float64(Float64(M * D) / Float64(2.0 * d)) ^ 2.0)) * Float64(h / l)))) end
function tmp = code(d, h, l, M, D) tmp = (((d / h) ^ (1.0 / 2.0)) * ((d / l) ^ (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * (((M * D) / (2.0 * d)) ^ 2.0)) * (h / l))); end
code[d_, h_, l_, M_, D_] := N[(N[(N[Power[N[(d / h), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision] * N[Power[N[(d / l), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(N[(N[(1.0 / 2.0), $MachinePrecision] * N[Power[N[(N[(M * D), $MachinePrecision] / N[(2.0 * d), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[(h / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left({\left(\frac{d}{h}\right)}^{\left(\frac{1}{2}\right)} \cdot {\left(\frac{d}{\ell}\right)}^{\left(\frac{1}{2}\right)}\right) \cdot \left(1 - \left(\frac{1}{2} \cdot {\left(\frac{M \cdot D}{2 \cdot d}\right)}^{2}\right) \cdot \frac{h}{\ell}\right)
\end{array}
Sampling outcomes in binary64 precision:
Herbie found 13 alternatives:
| Alternative | Accuracy | Speedup |
|---|
(FPCore (d h l M D) :precision binary64 (* (* (pow (/ d h) (/ 1.0 2.0)) (pow (/ d l) (/ 1.0 2.0))) (- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l)))))
double code(double d, double h, double l, double M, double D) {
return (pow((d / h), (1.0 / 2.0)) * pow((d / l), (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
code = (((d / h) ** (1.0d0 / 2.0d0)) * ((d / l) ** (1.0d0 / 2.0d0))) * (1.0d0 - (((1.0d0 / 2.0d0) * (((m * d_1) / (2.0d0 * d)) ** 2.0d0)) * (h / l)))
end function
public static double code(double d, double h, double l, double M, double D) {
return (Math.pow((d / h), (1.0 / 2.0)) * Math.pow((d / l), (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * Math.pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)));
}
def code(d, h, l, M, D): return (math.pow((d / h), (1.0 / 2.0)) * math.pow((d / l), (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * math.pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)))
function code(d, h, l, M, D) return Float64(Float64((Float64(d / h) ^ Float64(1.0 / 2.0)) * (Float64(d / l) ^ Float64(1.0 / 2.0))) * Float64(1.0 - Float64(Float64(Float64(1.0 / 2.0) * (Float64(Float64(M * D) / Float64(2.0 * d)) ^ 2.0)) * Float64(h / l)))) end
function tmp = code(d, h, l, M, D) tmp = (((d / h) ^ (1.0 / 2.0)) * ((d / l) ^ (1.0 / 2.0))) * (1.0 - (((1.0 / 2.0) * (((M * D) / (2.0 * d)) ^ 2.0)) * (h / l))); end
code[d_, h_, l_, M_, D_] := N[(N[(N[Power[N[(d / h), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision] * N[Power[N[(d / l), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(N[(N[(1.0 / 2.0), $MachinePrecision] * N[Power[N[(N[(M * D), $MachinePrecision] / N[(2.0 * d), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[(h / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left({\left(\frac{d}{h}\right)}^{\left(\frac{1}{2}\right)} \cdot {\left(\frac{d}{\ell}\right)}^{\left(\frac{1}{2}\right)}\right) \cdot \left(1 - \left(\frac{1}{2} \cdot {\left(\frac{M \cdot D}{2 \cdot d}\right)}^{2}\right) \cdot \frac{h}{\ell}\right)
\end{array}
(FPCore (d h l M D)
:precision binary64
(let* ((t_0
(- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l)))))
(if (<= d -1.05e-162)
(* (* -1.0 (* d (pow (pow (* l h) -1.0) 0.5))) t_0)
(if (<= d 3.2e-285)
(/
(fma
(* -0.125 (/ (pow (* D M) 2.0) d))
(pow (/ h l) 1.5)
(* (pow (/ h l) 0.5) d))
h)
(* (* (/ (sqrt d) (sqrt h)) (pow (/ d l) (/ 1.0 2.0))) t_0)))))
double code(double d, double h, double l, double M, double D) {
double t_0 = 1.0 - (((1.0 / 2.0) * pow(((M * D) / (2.0 * d)), 2.0)) * (h / l));
double tmp;
if (d <= -1.05e-162) {
tmp = (-1.0 * (d * pow(pow((l * h), -1.0), 0.5))) * t_0;
} else if (d <= 3.2e-285) {
tmp = fma((-0.125 * (pow((D * M), 2.0) / d)), pow((h / l), 1.5), (pow((h / l), 0.5) * d)) / h;
} else {
tmp = ((sqrt(d) / sqrt(h)) * pow((d / l), (1.0 / 2.0))) * t_0;
}
return tmp;
}
function code(d, h, l, M, D) t_0 = Float64(1.0 - Float64(Float64(Float64(1.0 / 2.0) * (Float64(Float64(M * D) / Float64(2.0 * d)) ^ 2.0)) * Float64(h / l))) tmp = 0.0 if (d <= -1.05e-162) tmp = Float64(Float64(-1.0 * Float64(d * ((Float64(l * h) ^ -1.0) ^ 0.5))) * t_0); elseif (d <= 3.2e-285) tmp = Float64(fma(Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)), (Float64(h / l) ^ 1.5), Float64((Float64(h / l) ^ 0.5) * d)) / h); else tmp = Float64(Float64(Float64(sqrt(d) / sqrt(h)) * (Float64(d / l) ^ Float64(1.0 / 2.0))) * t_0); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[(1.0 - N[(N[(N[(1.0 / 2.0), $MachinePrecision] * N[Power[N[(N[(M * D), $MachinePrecision] / N[(2.0 * d), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[(h / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[d, -1.05e-162], N[(N[(-1.0 * N[(d * N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision], If[LessEqual[d, 3.2e-285], N[(N[(N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision] * N[Power[N[(h / l), $MachinePrecision], 1.5], $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision], N[(N[(N[(N[Sqrt[d], $MachinePrecision] / N[Sqrt[h], $MachinePrecision]), $MachinePrecision] * N[Power[N[(d / l), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := 1 - \left(\frac{1}{2} \cdot {\left(\frac{M \cdot D}{2 \cdot d}\right)}^{2}\right) \cdot \frac{h}{\ell}\\
\mathbf{if}\;d \leq -1.05 \cdot 10^{-162}:\\
\;\;\;\;\left(-1 \cdot \left(d \cdot {\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5}\right)\right) \cdot t\_0\\
\mathbf{elif}\;d \leq 3.2 \cdot 10^{-285}:\\
\;\;\;\;\frac{\mathsf{fma}\left(-0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}, {\left(\frac{h}{\ell}\right)}^{1.5}, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\mathbf{else}:\\
\;\;\;\;\left(\frac{\sqrt{d}}{\sqrt{h}} \cdot {\left(\frac{d}{\ell}\right)}^{\left(\frac{1}{2}\right)}\right) \cdot t\_0\\
\end{array}
\end{array}
if d < -1.05e-162Initial program 76.8%
Taylor expanded in h around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6478.7
lift-/.f64N/A
metadata-eval78.7
Applied rewrites78.7%
if -1.05e-162 < d < 3.20000000000000016e-285Initial program 32.8%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites62.2%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
pow-powN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-eval64.6
Applied rewrites64.6%
if 3.20000000000000016e-285 < d Initial program 71.5%
lift-/.f64N/A
lift-pow.f64N/A
lift-/.f64N/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
lower-/.f64N/A
lower-sqrt.f64N/A
lower-sqrt.f6482.6
Applied rewrites82.6%
Final simplification77.8%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (pow (/ d l) (/ 1.0 2.0)))
(t_1
(- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l))))
(t_2 (pow (/ d h) 0.125)))
(if (<= (* (* (pow (/ d h) (/ 1.0 2.0)) t_0) t_1) 5e+236)
(* (* (* (pow (/ d h) 0.25) (* t_2 t_2)) t_0) t_1)
(/
(fma
(* -0.125 (/ (pow (* D M) 2.0) d))
(pow (/ h l) 1.5)
(* (pow (/ h l) 0.5) d))
h))))
double code(double d, double h, double l, double M, double D) {
double t_0 = pow((d / l), (1.0 / 2.0));
double t_1 = 1.0 - (((1.0 / 2.0) * pow(((M * D) / (2.0 * d)), 2.0)) * (h / l));
double t_2 = pow((d / h), 0.125);
double tmp;
if (((pow((d / h), (1.0 / 2.0)) * t_0) * t_1) <= 5e+236) {
tmp = ((pow((d / h), 0.25) * (t_2 * t_2)) * t_0) * t_1;
} else {
tmp = fma((-0.125 * (pow((D * M), 2.0) / d)), pow((h / l), 1.5), (pow((h / l), 0.5) * d)) / h;
}
return tmp;
}
function code(d, h, l, M, D) t_0 = Float64(d / l) ^ Float64(1.0 / 2.0) t_1 = Float64(1.0 - Float64(Float64(Float64(1.0 / 2.0) * (Float64(Float64(M * D) / Float64(2.0 * d)) ^ 2.0)) * Float64(h / l))) t_2 = Float64(d / h) ^ 0.125 tmp = 0.0 if (Float64(Float64((Float64(d / h) ^ Float64(1.0 / 2.0)) * t_0) * t_1) <= 5e+236) tmp = Float64(Float64(Float64((Float64(d / h) ^ 0.25) * Float64(t_2 * t_2)) * t_0) * t_1); else tmp = Float64(fma(Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)), (Float64(h / l) ^ 1.5), Float64((Float64(h / l) ^ 0.5) * d)) / h); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[Power[N[(d / l), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision]}, Block[{t$95$1 = N[(1.0 - N[(N[(N[(1.0 / 2.0), $MachinePrecision] * N[Power[N[(N[(M * D), $MachinePrecision] / N[(2.0 * d), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[(h / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[Power[N[(d / h), $MachinePrecision], 0.125], $MachinePrecision]}, If[LessEqual[N[(N[(N[Power[N[(d / h), $MachinePrecision], N[(1.0 / 2.0), $MachinePrecision]], $MachinePrecision] * t$95$0), $MachinePrecision] * t$95$1), $MachinePrecision], 5e+236], N[(N[(N[(N[Power[N[(d / h), $MachinePrecision], 0.25], $MachinePrecision] * N[(t$95$2 * t$95$2), $MachinePrecision]), $MachinePrecision] * t$95$0), $MachinePrecision] * t$95$1), $MachinePrecision], N[(N[(N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision] * N[Power[N[(h / l), $MachinePrecision], 1.5], $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(\frac{d}{\ell}\right)}^{\left(\frac{1}{2}\right)}\\
t_1 := 1 - \left(\frac{1}{2} \cdot {\left(\frac{M \cdot D}{2 \cdot d}\right)}^{2}\right) \cdot \frac{h}{\ell}\\
t_2 := {\left(\frac{d}{h}\right)}^{0.125}\\
\mathbf{if}\;\left({\left(\frac{d}{h}\right)}^{\left(\frac{1}{2}\right)} \cdot t\_0\right) \cdot t\_1 \leq 5 \cdot 10^{+236}:\\
\;\;\;\;\left(\left({\left(\frac{d}{h}\right)}^{0.25} \cdot \left(t\_2 \cdot t\_2\right)\right) \cdot t\_0\right) \cdot t\_1\\
\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{fma}\left(-0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}, {\left(\frac{h}{\ell}\right)}^{1.5}, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\end{array}
\end{array}
if (*.f64 (*.f64 (pow.f64 (/.f64 d h) (/.f64 #s(literal 1 binary64) #s(literal 2 binary64))) (pow.f64 (/.f64 d l) (/.f64 #s(literal 1 binary64) #s(literal 2 binary64)))) (-.f64 #s(literal 1 binary64) (*.f64 (*.f64 (/.f64 #s(literal 1 binary64) #s(literal 2 binary64)) (pow.f64 (/.f64 (*.f64 M D) (*.f64 #s(literal 2 binary64) d)) #s(literal 2 binary64))) (/.f64 h l)))) < 4.9999999999999997e236Initial program 87.9%
lift-/.f64N/A
lift-pow.f64N/A
sqr-powN/A
lower-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
lift-/.f64N/A
lift-/.f64N/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
lift-/.f6487.7
Applied rewrites87.7%
lift-/.f64N/A
lift-pow.f64N/A
sqr-powN/A
lower-*.f64N/A
metadata-evalN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-evalN/A
lower-pow.f64N/A
lift-/.f6487.7
Applied rewrites87.7%
if 4.9999999999999997e236 < (*.f64 (*.f64 (pow.f64 (/.f64 d h) (/.f64 #s(literal 1 binary64) #s(literal 2 binary64))) (pow.f64 (/.f64 d l) (/.f64 #s(literal 1 binary64) #s(literal 2 binary64)))) (-.f64 #s(literal 1 binary64) (*.f64 (*.f64 (/.f64 #s(literal 1 binary64) #s(literal 2 binary64)) (pow.f64 (/.f64 (*.f64 M D) (*.f64 #s(literal 2 binary64) d)) #s(literal 2 binary64))) (/.f64 h l)))) Initial program 25.2%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites42.4%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
pow-powN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-eval48.2
Applied rewrites48.2%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (* -0.125 (/ (pow (* D M) 2.0) d)))
(t_1 (pow (pow (* l h) -1.0) 0.5)))
(if (<= h -1.05e-297)
(*
(* -1.0 (* d t_1))
(- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l))))
(if (<= h 6.8e-149)
(* t_1 d)
(if (<= h 2.8e+171)
(/ (fma t_0 (pow (/ h l) 1.5) (* (pow (/ h l) 0.5) d)) h)
(fma
t_0
(pow (/ h (pow l 3.0)) 0.5)
(* (pow (exp (* (log (* l h)) -1.0)) 0.5) d)))))))
double code(double d, double h, double l, double M, double D) {
double t_0 = -0.125 * (pow((D * M), 2.0) / d);
double t_1 = pow(pow((l * h), -1.0), 0.5);
double tmp;
if (h <= -1.05e-297) {
tmp = (-1.0 * (d * t_1)) * (1.0 - (((1.0 / 2.0) * pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)));
} else if (h <= 6.8e-149) {
tmp = t_1 * d;
} else if (h <= 2.8e+171) {
tmp = fma(t_0, pow((h / l), 1.5), (pow((h / l), 0.5) * d)) / h;
} else {
tmp = fma(t_0, pow((h / pow(l, 3.0)), 0.5), (pow(exp((log((l * h)) * -1.0)), 0.5) * d));
}
return tmp;
}
function code(d, h, l, M, D) t_0 = Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)) t_1 = (Float64(l * h) ^ -1.0) ^ 0.5 tmp = 0.0 if (h <= -1.05e-297) tmp = Float64(Float64(-1.0 * Float64(d * t_1)) * Float64(1.0 - Float64(Float64(Float64(1.0 / 2.0) * (Float64(Float64(M * D) / Float64(2.0 * d)) ^ 2.0)) * Float64(h / l)))); elseif (h <= 6.8e-149) tmp = Float64(t_1 * d); elseif (h <= 2.8e+171) tmp = Float64(fma(t_0, (Float64(h / l) ^ 1.5), Float64((Float64(h / l) ^ 0.5) * d)) / h); else tmp = fma(t_0, (Float64(h / (l ^ 3.0)) ^ 0.5), Float64((exp(Float64(log(Float64(l * h)) * -1.0)) ^ 0.5) * d)); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision]}, If[LessEqual[h, -1.05e-297], N[(N[(-1.0 * N[(d * t$95$1), $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(N[(N[(1.0 / 2.0), $MachinePrecision] * N[Power[N[(N[(M * D), $MachinePrecision] / N[(2.0 * d), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[(h / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[h, 6.8e-149], N[(t$95$1 * d), $MachinePrecision], If[LessEqual[h, 2.8e+171], N[(N[(t$95$0 * N[Power[N[(h / l), $MachinePrecision], 1.5], $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision], N[(t$95$0 * N[Power[N[(h / N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision] + N[(N[Power[N[Exp[N[(N[Log[N[(l * h), $MachinePrecision]], $MachinePrecision] * -1.0), $MachinePrecision]], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}\\
t_1 := {\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5}\\
\mathbf{if}\;h \leq -1.05 \cdot 10^{-297}:\\
\;\;\;\;\left(-1 \cdot \left(d \cdot t\_1\right)\right) \cdot \left(1 - \left(\frac{1}{2} \cdot {\left(\frac{M \cdot D}{2 \cdot d}\right)}^{2}\right) \cdot \frac{h}{\ell}\right)\\
\mathbf{elif}\;h \leq 6.8 \cdot 10^{-149}:\\
\;\;\;\;t\_1 \cdot d\\
\mathbf{elif}\;h \leq 2.8 \cdot 10^{+171}:\\
\;\;\;\;\frac{\mathsf{fma}\left(t\_0, {\left(\frac{h}{\ell}\right)}^{1.5}, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, {\left(\frac{h}{{\ell}^{3}}\right)}^{0.5}, {\left(e^{\log \left(\ell \cdot h\right) \cdot -1}\right)}^{0.5} \cdot d\right)\\
\end{array}
\end{array}
if h < -1.05000000000000007e-297Initial program 64.4%
Taylor expanded in h around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6471.0
lift-/.f64N/A
metadata-eval71.0
Applied rewrites71.0%
if -1.05000000000000007e-297 < h < 6.7999999999999998e-149Initial program 66.5%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6472.0
lift-/.f64N/A
metadata-eval72.0
Applied rewrites72.0%
if 6.7999999999999998e-149 < h < 2.80000000000000004e171Initial program 74.2%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites58.9%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
pow-powN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-eval74.0
Applied rewrites74.0%
if 2.80000000000000004e171 < h Initial program 58.1%
Taylor expanded in l around inf
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites66.3%
lift-*.f64N/A
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f64N/A
lift-*.f6466.3
Applied rewrites66.3%
Final simplification71.3%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (pow (* l h) -1.0))
(t_1 (* -0.125 (/ (pow (* D M) 2.0) d)))
(t_2 (pow t_0 0.25)))
(if (<= h -1.05e-297)
(*
(* (* -1.0 d) (* t_2 t_2))
(- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l))))
(if (<= h 6.8e-149)
(* (pow t_0 0.5) d)
(if (<= h 2.8e+171)
(/ (fma t_1 (pow (/ h l) 1.5) (* (pow (/ h l) 0.5) d)) h)
(fma
t_1
(pow (/ h (pow l 3.0)) 0.5)
(* (pow (exp (* (log (* l h)) -1.0)) 0.5) d)))))))
double code(double d, double h, double l, double M, double D) {
double t_0 = pow((l * h), -1.0);
double t_1 = -0.125 * (pow((D * M), 2.0) / d);
double t_2 = pow(t_0, 0.25);
double tmp;
if (h <= -1.05e-297) {
tmp = ((-1.0 * d) * (t_2 * t_2)) * (1.0 - (((1.0 / 2.0) * pow(((M * D) / (2.0 * d)), 2.0)) * (h / l)));
} else if (h <= 6.8e-149) {
tmp = pow(t_0, 0.5) * d;
} else if (h <= 2.8e+171) {
tmp = fma(t_1, pow((h / l), 1.5), (pow((h / l), 0.5) * d)) / h;
} else {
tmp = fma(t_1, pow((h / pow(l, 3.0)), 0.5), (pow(exp((log((l * h)) * -1.0)), 0.5) * d));
}
return tmp;
}
function code(d, h, l, M, D) t_0 = Float64(l * h) ^ -1.0 t_1 = Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)) t_2 = t_0 ^ 0.25 tmp = 0.0 if (h <= -1.05e-297) tmp = Float64(Float64(Float64(-1.0 * d) * Float64(t_2 * t_2)) * Float64(1.0 - Float64(Float64(Float64(1.0 / 2.0) * (Float64(Float64(M * D) / Float64(2.0 * d)) ^ 2.0)) * Float64(h / l)))); elseif (h <= 6.8e-149) tmp = Float64((t_0 ^ 0.5) * d); elseif (h <= 2.8e+171) tmp = Float64(fma(t_1, (Float64(h / l) ^ 1.5), Float64((Float64(h / l) ^ 0.5) * d)) / h); else tmp = fma(t_1, (Float64(h / (l ^ 3.0)) ^ 0.5), Float64((exp(Float64(log(Float64(l * h)) * -1.0)) ^ 0.5) * d)); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision]}, Block[{t$95$1 = N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$2 = N[Power[t$95$0, 0.25], $MachinePrecision]}, If[LessEqual[h, -1.05e-297], N[(N[(N[(-1.0 * d), $MachinePrecision] * N[(t$95$2 * t$95$2), $MachinePrecision]), $MachinePrecision] * N[(1.0 - N[(N[(N[(1.0 / 2.0), $MachinePrecision] * N[Power[N[(N[(M * D), $MachinePrecision] / N[(2.0 * d), $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision] * N[(h / l), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[h, 6.8e-149], N[(N[Power[t$95$0, 0.5], $MachinePrecision] * d), $MachinePrecision], If[LessEqual[h, 2.8e+171], N[(N[(t$95$1 * N[Power[N[(h / l), $MachinePrecision], 1.5], $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision], N[(t$95$1 * N[Power[N[(h / N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision] + N[(N[Power[N[Exp[N[(N[Log[N[(l * h), $MachinePrecision]], $MachinePrecision] * -1.0), $MachinePrecision]], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision]]]]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(\ell \cdot h\right)}^{-1}\\
t_1 := -0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}\\
t_2 := {t\_0}^{0.25}\\
\mathbf{if}\;h \leq -1.05 \cdot 10^{-297}:\\
\;\;\;\;\left(\left(-1 \cdot d\right) \cdot \left(t\_2 \cdot t\_2\right)\right) \cdot \left(1 - \left(\frac{1}{2} \cdot {\left(\frac{M \cdot D}{2 \cdot d}\right)}^{2}\right) \cdot \frac{h}{\ell}\right)\\
\mathbf{elif}\;h \leq 6.8 \cdot 10^{-149}:\\
\;\;\;\;{t\_0}^{0.5} \cdot d\\
\mathbf{elif}\;h \leq 2.8 \cdot 10^{+171}:\\
\;\;\;\;\frac{\mathsf{fma}\left(t\_1, {\left(\frac{h}{\ell}\right)}^{1.5}, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t\_1, {\left(\frac{h}{{\ell}^{3}}\right)}^{0.5}, {\left(e^{\log \left(\ell \cdot h\right) \cdot -1}\right)}^{0.5} \cdot d\right)\\
\end{array}
\end{array}
if h < -1.05000000000000007e-297Initial program 64.4%
lift-/.f64N/A
lift-pow.f64N/A
sqr-powN/A
lower-*.f64N/A
lift-/.f64N/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
lift-/.f64N/A
lift-/.f64N/A
metadata-evalN/A
metadata-evalN/A
lower-pow.f64N/A
lift-/.f6464.3
Applied rewrites64.3%
Taylor expanded in h around -inf
pow-prod-upN/A
metadata-evalN/A
metadata-evalN/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lift-*.f64N/A
pow1/2N/A
inv-powN/A
*-commutativeN/A
lift-pow.f64N/A
lift-*.f64N/A
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
Applied rewrites70.8%
if -1.05000000000000007e-297 < h < 6.7999999999999998e-149Initial program 66.5%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6472.0
lift-/.f64N/A
metadata-eval72.0
Applied rewrites72.0%
if 6.7999999999999998e-149 < h < 2.80000000000000004e171Initial program 74.2%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites58.9%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
pow-powN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-eval74.0
Applied rewrites74.0%
if 2.80000000000000004e171 < h Initial program 58.1%
Taylor expanded in l around inf
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites66.3%
lift-*.f64N/A
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f64N/A
lift-*.f6466.3
Applied rewrites66.3%
(FPCore (d h l M D)
:precision binary64
(if (<= d -1.3e+158)
(* -1.0 (* d (/ 1.0 (pow (* l h) 0.5))))
(if (<= d 21000000000000.0)
(/
(fma
(* -0.125 (/ (pow (* D M) 2.0) d))
(pow (/ h l) 1.5)
(* (pow (/ h l) 0.5) d))
h)
(* (pow (pow (* l h) -1.0) 0.5) d))))
double code(double d, double h, double l, double M, double D) {
double tmp;
if (d <= -1.3e+158) {
tmp = -1.0 * (d * (1.0 / pow((l * h), 0.5)));
} else if (d <= 21000000000000.0) {
tmp = fma((-0.125 * (pow((D * M), 2.0) / d)), pow((h / l), 1.5), (pow((h / l), 0.5) * d)) / h;
} else {
tmp = pow(pow((l * h), -1.0), 0.5) * d;
}
return tmp;
}
function code(d, h, l, M, D) tmp = 0.0 if (d <= -1.3e+158) tmp = Float64(-1.0 * Float64(d * Float64(1.0 / (Float64(l * h) ^ 0.5)))); elseif (d <= 21000000000000.0) tmp = Float64(fma(Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)), (Float64(h / l) ^ 1.5), Float64((Float64(h / l) ^ 0.5) * d)) / h); else tmp = Float64(((Float64(l * h) ^ -1.0) ^ 0.5) * d); end return tmp end
code[d_, h_, l_, M_, D_] := If[LessEqual[d, -1.3e+158], N[(-1.0 * N[(d * N[(1.0 / N[Power[N[(l * h), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[d, 21000000000000.0], N[(N[(N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision] * N[Power[N[(h / l), $MachinePrecision], 1.5], $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision], N[(N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
\mathbf{if}\;d \leq -1.3 \cdot 10^{+158}:\\
\;\;\;\;-1 \cdot \left(d \cdot \frac{1}{{\left(\ell \cdot h\right)}^{0.5}}\right)\\
\mathbf{elif}\;d \leq 21000000000000:\\
\;\;\;\;\frac{\mathsf{fma}\left(-0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}, {\left(\frac{h}{\ell}\right)}^{1.5}, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\mathbf{else}:\\
\;\;\;\;{\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5} \cdot d\\
\end{array}
\end{array}
if d < -1.3e158Initial program 74.7%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6473.5
lift-/.f64N/A
metadata-eval73.5
Applied rewrites73.5%
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
lift-pow.f64N/A
*-commutativeN/A
inv-powN/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
pow1/2N/A
*-commutativeN/A
metadata-evalN/A
lower-pow.f64N/A
lift-*.f64N/A
metadata-eval73.5
Applied rewrites73.5%
if -1.3e158 < d < 2.1e13Initial program 58.8%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites58.1%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
pow-powN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-eval65.6
Applied rewrites65.6%
if 2.1e13 < d Initial program 81.7%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6463.4
lift-/.f64N/A
metadata-eval63.4
Applied rewrites63.4%
Final simplification66.3%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (* -0.125 (/ (pow (* D M) 2.0) d))))
(if (<= l 1.08e-190)
(/ (fma t_0 (pow (/ h l) 1.5) (* (pow (/ h l) 0.5) d)) h)
(fma
t_0
(pow (/ h (pow l 3.0)) 0.5)
(* (pow (pow (* l h) -1.0) 0.5) d)))))
double code(double d, double h, double l, double M, double D) {
double t_0 = -0.125 * (pow((D * M), 2.0) / d);
double tmp;
if (l <= 1.08e-190) {
tmp = fma(t_0, pow((h / l), 1.5), (pow((h / l), 0.5) * d)) / h;
} else {
tmp = fma(t_0, pow((h / pow(l, 3.0)), 0.5), (pow(pow((l * h), -1.0), 0.5) * d));
}
return tmp;
}
function code(d, h, l, M, D) t_0 = Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)) tmp = 0.0 if (l <= 1.08e-190) tmp = Float64(fma(t_0, (Float64(h / l) ^ 1.5), Float64((Float64(h / l) ^ 0.5) * d)) / h); else tmp = fma(t_0, (Float64(h / (l ^ 3.0)) ^ 0.5), Float64(((Float64(l * h) ^ -1.0) ^ 0.5) * d)); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision]}, If[LessEqual[l, 1.08e-190], N[(N[(t$95$0 * N[Power[N[(h / l), $MachinePrecision], 1.5], $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision], N[(t$95$0 * N[Power[N[(h / N[Power[l, 3.0], $MachinePrecision]), $MachinePrecision], 0.5], $MachinePrecision] + N[(N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := -0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}\\
\mathbf{if}\;\ell \leq 1.08 \cdot 10^{-190}:\\
\;\;\;\;\frac{\mathsf{fma}\left(t\_0, {\left(\frac{h}{\ell}\right)}^{1.5}, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\mathbf{else}:\\
\;\;\;\;\mathsf{fma}\left(t\_0, {\left(\frac{h}{{\ell}^{3}}\right)}^{0.5}, {\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5} \cdot d\right)\\
\end{array}
\end{array}
if l < 1.08e-190Initial program 64.9%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites53.9%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
pow-powN/A
lower-pow.f64N/A
lift-/.f64N/A
metadata-eval63.8
Applied rewrites63.8%
if 1.08e-190 < l Initial program 68.8%
Taylor expanded in l around inf
associate-*r*N/A
lower-fma.f64N/A
Applied rewrites66.3%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (pow (pow (/ h l) 3.0) 0.25)))
(if (<= d -1.35e+54)
(* -1.0 (* d (/ 1.0 (pow (* l h) 0.5))))
(if (<= d 470000000000.0)
(/
(fma
(* -0.125 (/ (pow (* D M) 2.0) d))
(* t_0 t_0)
(* (pow (/ h l) 0.5) d))
h)
(* (pow (pow (* l h) -1.0) 0.5) d)))))
double code(double d, double h, double l, double M, double D) {
double t_0 = pow(pow((h / l), 3.0), 0.25);
double tmp;
if (d <= -1.35e+54) {
tmp = -1.0 * (d * (1.0 / pow((l * h), 0.5)));
} else if (d <= 470000000000.0) {
tmp = fma((-0.125 * (pow((D * M), 2.0) / d)), (t_0 * t_0), (pow((h / l), 0.5) * d)) / h;
} else {
tmp = pow(pow((l * h), -1.0), 0.5) * d;
}
return tmp;
}
function code(d, h, l, M, D) t_0 = (Float64(h / l) ^ 3.0) ^ 0.25 tmp = 0.0 if (d <= -1.35e+54) tmp = Float64(-1.0 * Float64(d * Float64(1.0 / (Float64(l * h) ^ 0.5)))); elseif (d <= 470000000000.0) tmp = Float64(fma(Float64(-0.125 * Float64((Float64(D * M) ^ 2.0) / d)), Float64(t_0 * t_0), Float64((Float64(h / l) ^ 0.5) * d)) / h); else tmp = Float64(((Float64(l * h) ^ -1.0) ^ 0.5) * d); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[Power[N[Power[N[(h / l), $MachinePrecision], 3.0], $MachinePrecision], 0.25], $MachinePrecision]}, If[LessEqual[d, -1.35e+54], N[(-1.0 * N[(d * N[(1.0 / N[Power[N[(l * h), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[d, 470000000000.0], N[(N[(N[(-0.125 * N[(N[Power[N[(D * M), $MachinePrecision], 2.0], $MachinePrecision] / d), $MachinePrecision]), $MachinePrecision] * N[(t$95$0 * t$95$0), $MachinePrecision] + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision], N[(N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left({\left(\frac{h}{\ell}\right)}^{3}\right)}^{0.25}\\
\mathbf{if}\;d \leq -1.35 \cdot 10^{+54}:\\
\;\;\;\;-1 \cdot \left(d \cdot \frac{1}{{\left(\ell \cdot h\right)}^{0.5}}\right)\\
\mathbf{elif}\;d \leq 470000000000:\\
\;\;\;\;\frac{\mathsf{fma}\left(-0.125 \cdot \frac{{\left(D \cdot M\right)}^{2}}{d}, t\_0 \cdot t\_0, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot d\right)}{h}\\
\mathbf{else}:\\
\;\;\;\;{\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5} \cdot d\\
\end{array}
\end{array}
if d < -1.35000000000000005e54Initial program 76.7%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6467.9
lift-/.f64N/A
metadata-eval67.9
Applied rewrites67.9%
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
lift-pow.f64N/A
*-commutativeN/A
inv-powN/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
pow1/2N/A
*-commutativeN/A
metadata-evalN/A
lower-pow.f64N/A
lift-*.f64N/A
metadata-eval68.0
Applied rewrites68.0%
if -1.35000000000000005e54 < d < 4.7e11Initial program 55.1%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites58.8%
lift-pow.f64N/A
lift-/.f64N/A
lift-pow.f64N/A
sqr-powN/A
lower-*.f64N/A
metadata-evalN/A
lower-pow.f64N/A
lift-pow.f64N/A
lift-/.f64N/A
metadata-evalN/A
lower-pow.f64N/A
lift-pow.f64N/A
lift-/.f6458.8
Applied rewrites58.8%
if 4.7e11 < d Initial program 81.7%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6463.4
lift-/.f64N/A
metadata-eval63.4
Applied rewrites63.4%
Final simplification62.0%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (pow (* M D) 1.0)))
(if (<= d -1.35e+54)
(* -1.0 (* d (/ 1.0 (pow (* l h) 0.5))))
(if (<= d 470000000000.0)
(/
(/
(fma
(* (pow (pow (/ h l) 3.0) 0.5) (* t_0 t_0))
-0.125
(* (pow (/ h l) 0.5) (* d d)))
d)
h)
(* (pow (pow (* l h) -1.0) 0.5) d)))))
double code(double d, double h, double l, double M, double D) {
double t_0 = pow((M * D), 1.0);
double tmp;
if (d <= -1.35e+54) {
tmp = -1.0 * (d * (1.0 / pow((l * h), 0.5)));
} else if (d <= 470000000000.0) {
tmp = (fma((pow(pow((h / l), 3.0), 0.5) * (t_0 * t_0)), -0.125, (pow((h / l), 0.5) * (d * d))) / d) / h;
} else {
tmp = pow(pow((l * h), -1.0), 0.5) * d;
}
return tmp;
}
function code(d, h, l, M, D) t_0 = Float64(M * D) ^ 1.0 tmp = 0.0 if (d <= -1.35e+54) tmp = Float64(-1.0 * Float64(d * Float64(1.0 / (Float64(l * h) ^ 0.5)))); elseif (d <= 470000000000.0) tmp = Float64(Float64(fma(Float64(((Float64(h / l) ^ 3.0) ^ 0.5) * Float64(t_0 * t_0)), -0.125, Float64((Float64(h / l) ^ 0.5) * Float64(d * d))) / d) / h); else tmp = Float64(((Float64(l * h) ^ -1.0) ^ 0.5) * d); end return tmp end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[Power[N[(M * D), $MachinePrecision], 1.0], $MachinePrecision]}, If[LessEqual[d, -1.35e+54], N[(-1.0 * N[(d * N[(1.0 / N[Power[N[(l * h), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[d, 470000000000.0], N[(N[(N[(N[(N[Power[N[Power[N[(h / l), $MachinePrecision], 3.0], $MachinePrecision], 0.5], $MachinePrecision] * N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision] * -0.125 + N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * N[(d * d), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / d), $MachinePrecision] / h), $MachinePrecision], N[(N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left(M \cdot D\right)}^{1}\\
\mathbf{if}\;d \leq -1.35 \cdot 10^{+54}:\\
\;\;\;\;-1 \cdot \left(d \cdot \frac{1}{{\left(\ell \cdot h\right)}^{0.5}}\right)\\
\mathbf{elif}\;d \leq 470000000000:\\
\;\;\;\;\frac{\frac{\mathsf{fma}\left({\left({\left(\frac{h}{\ell}\right)}^{3}\right)}^{0.5} \cdot \left(t\_0 \cdot t\_0\right), -0.125, {\left(\frac{h}{\ell}\right)}^{0.5} \cdot \left(d \cdot d\right)\right)}{d}}{h}\\
\mathbf{else}:\\
\;\;\;\;{\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5} \cdot d\\
\end{array}
\end{array}
if d < -1.35000000000000005e54Initial program 76.7%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6467.9
lift-/.f64N/A
metadata-eval67.9
Applied rewrites67.9%
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
lift-pow.f64N/A
*-commutativeN/A
inv-powN/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
pow1/2N/A
*-commutativeN/A
metadata-evalN/A
lower-pow.f64N/A
lift-*.f64N/A
metadata-eval68.0
Applied rewrites68.0%
if -1.35000000000000005e54 < d < 4.7e11Initial program 55.1%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites58.8%
Taylor expanded in d around 0
lower-/.f64N/A
Applied rewrites56.5%
if 4.7e11 < d Initial program 81.7%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6463.4
lift-/.f64N/A
metadata-eval63.4
Applied rewrites63.4%
Final simplification60.8%
(FPCore (d h l M D)
:precision binary64
(let* ((t_0 (/ (* d (* (pow (/ h l) 0.5) -1.0)) h)))
(if (<= h -1.9e+259)
t_0
(if (<= h -4e-309)
(* -1.0 (* d (/ 1.0 (pow (* l h) 0.5))))
(if (<= h 5.5e+130) (* (pow (pow (* l h) -1.0) 0.5) d) t_0)))))
double code(double d, double h, double l, double M, double D) {
double t_0 = (d * (pow((h / l), 0.5) * -1.0)) / h;
double tmp;
if (h <= -1.9e+259) {
tmp = t_0;
} else if (h <= -4e-309) {
tmp = -1.0 * (d * (1.0 / pow((l * h), 0.5)));
} else if (h <= 5.5e+130) {
tmp = pow(pow((l * h), -1.0), 0.5) * d;
} else {
tmp = t_0;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
real(8) :: t_0
real(8) :: tmp
t_0 = (d * (((h / l) ** 0.5d0) * (-1.0d0))) / h
if (h <= (-1.9d+259)) then
tmp = t_0
else if (h <= (-4d-309)) then
tmp = (-1.0d0) * (d * (1.0d0 / ((l * h) ** 0.5d0)))
else if (h <= 5.5d+130) then
tmp = (((l * h) ** (-1.0d0)) ** 0.5d0) * d
else
tmp = t_0
end if
code = tmp
end function
public static double code(double d, double h, double l, double M, double D) {
double t_0 = (d * (Math.pow((h / l), 0.5) * -1.0)) / h;
double tmp;
if (h <= -1.9e+259) {
tmp = t_0;
} else if (h <= -4e-309) {
tmp = -1.0 * (d * (1.0 / Math.pow((l * h), 0.5)));
} else if (h <= 5.5e+130) {
tmp = Math.pow(Math.pow((l * h), -1.0), 0.5) * d;
} else {
tmp = t_0;
}
return tmp;
}
def code(d, h, l, M, D): t_0 = (d * (math.pow((h / l), 0.5) * -1.0)) / h tmp = 0 if h <= -1.9e+259: tmp = t_0 elif h <= -4e-309: tmp = -1.0 * (d * (1.0 / math.pow((l * h), 0.5))) elif h <= 5.5e+130: tmp = math.pow(math.pow((l * h), -1.0), 0.5) * d else: tmp = t_0 return tmp
function code(d, h, l, M, D) t_0 = Float64(Float64(d * Float64((Float64(h / l) ^ 0.5) * -1.0)) / h) tmp = 0.0 if (h <= -1.9e+259) tmp = t_0; elseif (h <= -4e-309) tmp = Float64(-1.0 * Float64(d * Float64(1.0 / (Float64(l * h) ^ 0.5)))); elseif (h <= 5.5e+130) tmp = Float64(((Float64(l * h) ^ -1.0) ^ 0.5) * d); else tmp = t_0; end return tmp end
function tmp_2 = code(d, h, l, M, D) t_0 = (d * (((h / l) ^ 0.5) * -1.0)) / h; tmp = 0.0; if (h <= -1.9e+259) tmp = t_0; elseif (h <= -4e-309) tmp = -1.0 * (d * (1.0 / ((l * h) ^ 0.5))); elseif (h <= 5.5e+130) tmp = (((l * h) ^ -1.0) ^ 0.5) * d; else tmp = t_0; end tmp_2 = tmp; end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[(N[(d * N[(N[Power[N[(h / l), $MachinePrecision], 0.5], $MachinePrecision] * -1.0), $MachinePrecision]), $MachinePrecision] / h), $MachinePrecision]}, If[LessEqual[h, -1.9e+259], t$95$0, If[LessEqual[h, -4e-309], N[(-1.0 * N[(d * N[(1.0 / N[Power[N[(l * h), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], If[LessEqual[h, 5.5e+130], N[(N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision] * d), $MachinePrecision], t$95$0]]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := \frac{d \cdot \left({\left(\frac{h}{\ell}\right)}^{0.5} \cdot -1\right)}{h}\\
\mathbf{if}\;h \leq -1.9 \cdot 10^{+259}:\\
\;\;\;\;t\_0\\
\mathbf{elif}\;h \leq -4 \cdot 10^{-309}:\\
\;\;\;\;-1 \cdot \left(d \cdot \frac{1}{{\left(\ell \cdot h\right)}^{0.5}}\right)\\
\mathbf{elif}\;h \leq 5.5 \cdot 10^{+130}:\\
\;\;\;\;{\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5} \cdot d\\
\mathbf{else}:\\
\;\;\;\;t\_0\\
\end{array}
\end{array}
if h < -1.9e259 or 5.4999999999999997e130 < h Initial program 65.7%
Taylor expanded in h around 0
lower-/.f64N/A
Applied rewrites33.3%
Taylor expanded in l around -inf
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
associate-*l*N/A
lower-*.f64N/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
lift-pow.f64N/A
lift-/.f6441.2
Applied rewrites41.2%
if -1.9e259 < h < -3.9999999999999977e-309Initial program 62.7%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6448.3
lift-/.f64N/A
metadata-eval48.3
Applied rewrites48.3%
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
lift-pow.f64N/A
*-commutativeN/A
inv-powN/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
pow1/2N/A
*-commutativeN/A
metadata-evalN/A
lower-pow.f64N/A
lift-*.f64N/A
metadata-eval48.3
Applied rewrites48.3%
if -3.9999999999999977e-309 < h < 5.4999999999999997e130Initial program 72.2%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6452.0
lift-/.f64N/A
metadata-eval52.0
Applied rewrites52.0%
Final simplification48.3%
(FPCore (d h l M D) :precision binary64 (let* ((t_0 (pow (pow (* l h) -1.0) 0.5))) (if (<= d 2.1e-228) (* -1.0 (* d t_0)) (* t_0 d))))
double code(double d, double h, double l, double M, double D) {
double t_0 = pow(pow((l * h), -1.0), 0.5);
double tmp;
if (d <= 2.1e-228) {
tmp = -1.0 * (d * t_0);
} else {
tmp = t_0 * d;
}
return tmp;
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
real(8) :: t_0
real(8) :: tmp
t_0 = ((l * h) ** (-1.0d0)) ** 0.5d0
if (d <= 2.1d-228) then
tmp = (-1.0d0) * (d * t_0)
else
tmp = t_0 * d
end if
code = tmp
end function
public static double code(double d, double h, double l, double M, double D) {
double t_0 = Math.pow(Math.pow((l * h), -1.0), 0.5);
double tmp;
if (d <= 2.1e-228) {
tmp = -1.0 * (d * t_0);
} else {
tmp = t_0 * d;
}
return tmp;
}
def code(d, h, l, M, D): t_0 = math.pow(math.pow((l * h), -1.0), 0.5) tmp = 0 if d <= 2.1e-228: tmp = -1.0 * (d * t_0) else: tmp = t_0 * d return tmp
function code(d, h, l, M, D) t_0 = (Float64(l * h) ^ -1.0) ^ 0.5 tmp = 0.0 if (d <= 2.1e-228) tmp = Float64(-1.0 * Float64(d * t_0)); else tmp = Float64(t_0 * d); end return tmp end
function tmp_2 = code(d, h, l, M, D) t_0 = ((l * h) ^ -1.0) ^ 0.5; tmp = 0.0; if (d <= 2.1e-228) tmp = -1.0 * (d * t_0); else tmp = t_0 * d; end tmp_2 = tmp; end
code[d_, h_, l_, M_, D_] := Block[{t$95$0 = N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision]}, If[LessEqual[d, 2.1e-228], N[(-1.0 * N[(d * t$95$0), $MachinePrecision]), $MachinePrecision], N[(t$95$0 * d), $MachinePrecision]]]
\begin{array}{l}
\\
\begin{array}{l}
t_0 := {\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5}\\
\mathbf{if}\;d \leq 2.1 \cdot 10^{-228}:\\
\;\;\;\;-1 \cdot \left(d \cdot t\_0\right)\\
\mathbf{else}:\\
\;\;\;\;t\_0 \cdot d\\
\end{array}
\end{array}
if d < 2.09999999999999991e-228Initial program 60.8%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6443.3
lift-/.f64N/A
metadata-eval43.3
Applied rewrites43.3%
if 2.09999999999999991e-228 < d Initial program 75.3%
Taylor expanded in d around inf
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6446.4
lift-/.f64N/A
metadata-eval46.4
Applied rewrites46.4%
Final simplification44.5%
(FPCore (d h l M D) :precision binary64 (* -1.0 (* d (pow (pow (* l h) -1.0) 0.5))))
double code(double d, double h, double l, double M, double D) {
return -1.0 * (d * pow(pow((l * h), -1.0), 0.5));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
code = (-1.0d0) * (d * (((l * h) ** (-1.0d0)) ** 0.5d0))
end function
public static double code(double d, double h, double l, double M, double D) {
return -1.0 * (d * Math.pow(Math.pow((l * h), -1.0), 0.5));
}
def code(d, h, l, M, D): return -1.0 * (d * math.pow(math.pow((l * h), -1.0), 0.5))
function code(d, h, l, M, D) return Float64(-1.0 * Float64(d * ((Float64(l * h) ^ -1.0) ^ 0.5))) end
function tmp = code(d, h, l, M, D) tmp = -1.0 * (d * (((l * h) ^ -1.0) ^ 0.5)); end
code[d_, h_, l_, M_, D_] := N[(-1.0 * N[(d * N[Power[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1 \cdot \left(d \cdot {\left({\left(\ell \cdot h\right)}^{-1}\right)}^{0.5}\right)
\end{array}
Initial program 66.4%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6428.8
lift-/.f64N/A
metadata-eval28.8
Applied rewrites28.8%
Final simplification28.8%
(FPCore (d h l M D) :precision binary64 (* -1.0 (* d (/ 1.0 (pow (* l h) 0.5)))))
double code(double d, double h, double l, double M, double D) {
return -1.0 * (d * (1.0 / pow((l * h), 0.5)));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
code = (-1.0d0) * (d * (1.0d0 / ((l * h) ** 0.5d0)))
end function
public static double code(double d, double h, double l, double M, double D) {
return -1.0 * (d * (1.0 / Math.pow((l * h), 0.5)));
}
def code(d, h, l, M, D): return -1.0 * (d * (1.0 / math.pow((l * h), 0.5)))
function code(d, h, l, M, D) return Float64(-1.0 * Float64(d * Float64(1.0 / (Float64(l * h) ^ 0.5)))) end
function tmp = code(d, h, l, M, D) tmp = -1.0 * (d * (1.0 / ((l * h) ^ 0.5))); end
code[d_, h_, l_, M_, D_] := N[(-1.0 * N[(d * N[(1.0 / N[Power[N[(l * h), $MachinePrecision], 0.5], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
-1 \cdot \left(d \cdot \frac{1}{{\left(\ell \cdot h\right)}^{0.5}}\right)
\end{array}
Initial program 66.4%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6428.8
lift-/.f64N/A
metadata-eval28.8
Applied rewrites28.8%
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
lift-pow.f64N/A
*-commutativeN/A
inv-powN/A
metadata-evalN/A
pow1/2N/A
sqrt-divN/A
metadata-evalN/A
lower-/.f64N/A
pow1/2N/A
*-commutativeN/A
metadata-evalN/A
lower-pow.f64N/A
lift-*.f64N/A
metadata-eval28.4
Applied rewrites28.4%
Final simplification28.4%
(FPCore (d h l M D) :precision binary64 (* (* -1.0 d) (exp (* (log (pow (* l h) -1.0)) 0.5))))
double code(double d, double h, double l, double M, double D) {
return (-1.0 * d) * exp((log(pow((l * h), -1.0)) * 0.5));
}
module fmin_fmax_functions
implicit none
private
public fmax
public fmin
interface fmax
module procedure fmax88
module procedure fmax44
module procedure fmax84
module procedure fmax48
end interface
interface fmin
module procedure fmin88
module procedure fmin44
module procedure fmin84
module procedure fmin48
end interface
contains
real(8) function fmax88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(4) function fmax44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, max(x, y), y /= y), x /= x)
end function
real(8) function fmax84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, max(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmax48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), max(dble(x), y), y /= y), x /= x)
end function
real(8) function fmin88(x, y) result (res)
real(8), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(4) function fmin44(x, y) result (res)
real(4), intent (in) :: x
real(4), intent (in) :: y
res = merge(y, merge(x, min(x, y), y /= y), x /= x)
end function
real(8) function fmin84(x, y) result(res)
real(8), intent (in) :: x
real(4), intent (in) :: y
res = merge(dble(y), merge(x, min(x, dble(y)), y /= y), x /= x)
end function
real(8) function fmin48(x, y) result(res)
real(4), intent (in) :: x
real(8), intent (in) :: y
res = merge(y, merge(dble(x), min(dble(x), y), y /= y), x /= x)
end function
end module
real(8) function code(d, h, l, m, d_1)
use fmin_fmax_functions
real(8), intent (in) :: d
real(8), intent (in) :: h
real(8), intent (in) :: l
real(8), intent (in) :: m
real(8), intent (in) :: d_1
code = ((-1.0d0) * d) * exp((log(((l * h) ** (-1.0d0))) * 0.5d0))
end function
public static double code(double d, double h, double l, double M, double D) {
return (-1.0 * d) * Math.exp((Math.log(Math.pow((l * h), -1.0)) * 0.5));
}
def code(d, h, l, M, D): return (-1.0 * d) * math.exp((math.log(math.pow((l * h), -1.0)) * 0.5))
function code(d, h, l, M, D) return Float64(Float64(-1.0 * d) * exp(Float64(log((Float64(l * h) ^ -1.0)) * 0.5))) end
function tmp = code(d, h, l, M, D) tmp = (-1.0 * d) * exp((log(((l * h) ^ -1.0)) * 0.5)); end
code[d_, h_, l_, M_, D_] := N[(N[(-1.0 * d), $MachinePrecision] * N[Exp[N[(N[Log[N[Power[N[(l * h), $MachinePrecision], -1.0], $MachinePrecision]], $MachinePrecision] * 0.5), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}
\\
\left(-1 \cdot d\right) \cdot e^{\log \left({\left(\ell \cdot h\right)}^{-1}\right) \cdot 0.5}
\end{array}
Initial program 66.4%
Taylor expanded in l around -inf
lower-*.f64N/A
sqrt-pow2N/A
metadata-evalN/A
metadata-evalN/A
*-commutativeN/A
lower-*.f64N/A
pow1/2N/A
metadata-evalN/A
lift-/.f64N/A
lower-pow.f64N/A
inv-powN/A
lower-pow.f64N/A
*-commutativeN/A
lower-*.f6428.8
lift-/.f64N/A
metadata-eval28.8
Applied rewrites28.8%
metadata-evalN/A
lift-pow.f64N/A
lift-*.f64N/A
lift-pow.f64N/A
pow-to-expN/A
lower-exp.f64N/A
lower-*.f64N/A
lower-log.f64N/A
lift-pow.f64N/A
lift-*.f64N/A
metadata-eval27.6
Applied rewrites27.6%
herbie shell --seed 2025064
(FPCore (d h l M D)
:name "Henrywood and Agarwal, Equation (12)"
:precision binary64
(* (* (pow (/ d h) (/ 1.0 2.0)) (pow (/ d l) (/ 1.0 2.0))) (- 1.0 (* (* (/ 1.0 2.0) (pow (/ (* M D) (* 2.0 d)) 2.0)) (/ h l)))))