(FPCore (x) :precision binary64 (- (exp x) 1.0))
double code(double x) { return exp(x) - 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = exp(x) - 1.0d0 end function
public static double code(double x) { return Math.exp(x) - 1.0; }
def code(x): return math.exp(x) - 1.0
function code(x) return Float64(exp(x) - 1.0) end
function tmp = code(x) tmp = exp(x) - 1.0; end
code[x_] := N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ e^{x} - 1 \end{array}
Sampling outcomes in binary64 precision:
Herbie found 5 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (- (exp x) 1.0))
double code(double x) { return exp(x) - 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = exp(x) - 1.0d0 end function
public static double code(double x) { return Math.exp(x) - 1.0; }
def code(x): return math.exp(x) - 1.0
function code(x) return Float64(exp(x) - 1.0) end
function tmp = code(x) tmp = exp(x) - 1.0; end
code[x_] := N[(N[Exp[x], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ e^{x} - 1 \end{array}
(FPCore (x) :precision binary64 (expm1 x))
double code(double x) { return expm1(x); }
public static double code(double x) { return Math.expm1(x); }
def code(x): return math.expm1(x)
function code(x) return expm1(x) end
code[x_] := N[(Exp[x] - 1), $MachinePrecision]
\begin{array}{l} \\ \mathsf{expm1}\left(x\right) \end{array}
Initial program 34.5%
lift--.f64
N/A
lift-exp.f64
N/A
lower-expm1.f64
100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (* (fma (fma 0.16666666666666666 x 0.5) x 1.0) x))
double code(double x) { return fma(fma(0.16666666666666666, x, 0.5), x, 1.0) * x; }
function code(x) return Float64(fma(fma(0.16666666666666666, x, 0.5), x, 1.0) * x) end
code[x_] := N[(N[(N[(0.16666666666666666 * x + 0.5), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666, x, 0.5\right), x, 1\right) \cdot x \end{array}
Initial program 34.5%
Taylor expanded in x around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
71.6
Applied rewrites71.6%
(FPCore (x) :precision binary64 (* (fma 0.5 x 1.0) x))
double code(double x) { return fma(0.5, x, 1.0) * x; }
function code(x) return Float64(fma(0.5, x, 1.0) * x) end
code[x_] := N[(N[(0.5 * x + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(0.5, x, 1\right) \cdot x \end{array}
Initial program 34.5%
Taylor expanded in x around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
70.7
Applied rewrites70.7%
(FPCore (x) :precision binary64 (- (+ 1.0 x) 1.0))
double code(double x) { return (1.0 + x) - 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = (1.0d0 + x) - 1.0d0 end function
public static double code(double x) { return (1.0 + x) - 1.0; }
def code(x): return (1.0 + x) - 1.0
function code(x) return Float64(Float64(1.0 + x) - 1.0) end
function tmp = code(x) tmp = (1.0 + x) - 1.0; end
code[x_] := N[(N[(1.0 + x), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ \left(1 + x\right) - 1 \end{array}
Initial program 34.5%
Taylor expanded in x around 0
lower-+.f64
6.4
Applied rewrites6.4%
(FPCore (x) :precision binary64 (- 1.0 1.0))
double code(double x) { return 1.0 - 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = 1.0d0 - 1.0d0 end function
public static double code(double x) { return 1.0 - 1.0; }
def code(x): return 1.0 - 1.0
function code(x) return Float64(1.0 - 1.0) end
function tmp = code(x) tmp = 1.0 - 1.0; end
code[x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l} \\ 1 - 1 \end{array}
Initial program 34.5%
Taylor expanded in x around 0
Applied rewrites4.5%
herbie shell --seed 1
(FPCore (x)
:name "exp(x)-1"
:precision binary64
:pre (and (<= -1.79e+308 x) (<= x 1000000000.0))
(- (exp x) 1.0))