Average Error: 0.3 → 0.3
Time: 6.0s
Precision: binary64
Cost: 6592
\[x \cdot \log x \]
\[x \cdot \log x \]
(FPCore (x) :precision binary64 (* x (log x)))
(FPCore (x) :precision binary64 (* x (log x)))
double code(double x) {
	return x * log(x);
}
double code(double x) {
	return x * log(x);
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = x * log(x)
end function
real(8) function code(x)
    real(8), intent (in) :: x
    code = x * log(x)
end function
public static double code(double x) {
	return x * Math.log(x);
}
public static double code(double x) {
	return x * Math.log(x);
}
def code(x):
	return x * math.log(x)
def code(x):
	return x * math.log(x)
function code(x)
	return Float64(x * log(x))
end
function code(x)
	return Float64(x * log(x))
end
function tmp = code(x)
	tmp = x * log(x);
end
function tmp = code(x)
	tmp = x * log(x);
end
code[x_] := N[(x * N[Log[x], $MachinePrecision]), $MachinePrecision]
code[x_] := N[(x * N[Log[x], $MachinePrecision]), $MachinePrecision]
x \cdot \log x
x \cdot \log x

Error

Try it out

Your Program's Arguments

Results

Enter valid numbers for all inputs

Derivation

  1. Initial program 0.3

    \[x \cdot \log x \]
  2. Final simplification0.3

    \[\leadsto x \cdot \log x \]

Reproduce

herbie shell --seed 2022291 
(FPCore (x)
  :name "Statistics.Distribution.Binomial:directEntropy from math-functions-0.1.5.2"
  :precision binary64
  (* x (log x)))