(FPCore (a x) :precision binary64 (- (exp (* a x)) 1.0))
double code(double a, double x) { return exp((a * x)) - 1.0; }
real(8) function code(a, x) real(8), intent (in) :: a real(8), intent (in) :: x code = exp((a * x)) - 1.0d0 end function
public static double code(double a, double x) { return Math.exp((a * x)) - 1.0; }
def code(a, x): return math.exp((a * x)) - 1.0
function code(a, x) return Float64(exp(Float64(a * x)) - 1.0) end
function tmp = code(a, x) tmp = exp((a * x)) - 1.0; end
code[a_, x_] := N[(N[Exp[N[(a * x), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ e^{a \cdot x} - 1 \end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (a x) :precision binary64 (- (exp (* a x)) 1.0))
double code(double a, double x) { return exp((a * x)) - 1.0; }
real(8) function code(a, x) real(8), intent (in) :: a real(8), intent (in) :: x code = exp((a * x)) - 1.0d0 end function
public static double code(double a, double x) { return Math.exp((a * x)) - 1.0; }
def code(a, x): return math.exp((a * x)) - 1.0
function code(a, x) return Float64(exp(Float64(a * x)) - 1.0) end
function tmp = code(a, x) tmp = exp((a * x)) - 1.0; end
code[a_, x_] := N[(N[Exp[N[(a * x), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ e^{a \cdot x} - 1 \end{array}
(FPCore (a x) :precision binary64 (expm1 (* x a)))
double code(double a, double x) { return expm1((x * a)); }
public static double code(double a, double x) { return Math.expm1((x * a)); }
def code(a, x): return math.expm1((x * a))
function code(a, x) return expm1(Float64(x * a)) end
code[a_, x_] := N[(Exp[N[(x * a), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l} \\ \mathsf{expm1}\left(x \cdot a\right) \end{array}
Initial program 49.7%
lift--.f64
N/A
lift-exp.f64
N/A
lower-expm1.f64
100.0
lift-*.f64
N/A
*-commutative
N/A
lower-*.f64
100.0
Applied rewrites100.0%
(FPCore (a x) :precision binary64 (* (fma (* (fma (* 0.16666666666666666 x) a 0.5) a) x 1.0) (* x a)))
double code(double a, double x) { return fma((fma((0.16666666666666666 * x), a, 0.5) * a), x, 1.0) * (x * a); }
function code(a, x) return Float64(fma(Float64(fma(Float64(0.16666666666666666 * x), a, 0.5) * a), x, 1.0) * Float64(x * a)) end
code[a_, x_] := N[(N[(N[(N[(N[(0.16666666666666666 * x), $MachinePrecision] * a + 0.5), $MachinePrecision] * a), $MachinePrecision] * x + 1.0), $MachinePrecision] * N[(x * a), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(0.16666666666666666 \cdot x, a, 0.5\right) \cdot a, x, 1\right) \cdot \left(x \cdot a\right) \end{array}
Initial program 49.7%
Taylor expanded in a around 0
Applied rewrites71.7%
(FPCore (a x) :precision binary64 (* x a))
double code(double a, double x) { return x * a; }
real(8) function code(a, x) real(8), intent (in) :: a real(8), intent (in) :: x code = x * a end function
public static double code(double a, double x) { return x * a; }
def code(a, x): return x * a
function code(a, x) return Float64(x * a) end
function tmp = code(a, x) tmp = x * a; end
code[a_, x_] := N[(x * a), $MachinePrecision]
\begin{array}{l} \\ x \cdot a \end{array}
Initial program 49.7%
Taylor expanded in a around 0
*-commutative
N/A
lower-*.f64
70.7
Applied rewrites70.7%
(FPCore (a x) :precision binary64 (- 1.0 1.0))
double code(double a, double x) { return 1.0 - 1.0; }
real(8) function code(a, x) real(8), intent (in) :: a real(8), intent (in) :: x code = 1.0d0 - 1.0d0 end function
public static double code(double a, double x) { return 1.0 - 1.0; }
def code(a, x): return 1.0 - 1.0
function code(a, x) return Float64(1.0 - 1.0) end
function tmp = code(a, x) tmp = 1.0 - 1.0; end
code[a_, x_] := N[(1.0 - 1.0), $MachinePrecision]
\begin{array}{l} \\ 1 - 1 \end{array}
Initial program 49.7%
Taylor expanded in a around 0
Applied rewrites19.0%
(FPCore (a x) :precision binary64 (expm1 (* a x)))
double code(double a, double x) { return expm1((a * x)); }
public static double code(double a, double x) { return Math.expm1((a * x)); }
def code(a, x): return math.expm1((a * x))
function code(a, x) return expm1(Float64(a * x)) end
code[a_, x_] := N[(Exp[N[(a * x), $MachinePrecision]] - 1), $MachinePrecision]
\begin{array}{l} \\ \mathsf{expm1}\left(a \cdot x\right) \end{array}
herbie shell --seed 1
(FPCore (a x)
:name "expax (section 3.5)"
:precision binary64
:pre (> 710.0 (* a x))
:alt
(! :herbie-platform default (expm1 (* a x)))
(- (exp (* a x)) 1.0))