(FPCore (x) :precision binary64 (let* ((t_0 (exp (- x)))) (/ (- (exp x) t_0) (+ (exp x) t_0))))
double code(double x) { double t_0 = exp(-x); return (exp(x) - t_0) / (exp(x) + t_0); }
real(8) function code(x) real(8), intent (in) :: x real(8) :: t_0 t_0 = exp(-x) code = (exp(x) - t_0) / (exp(x) + t_0) end function
public static double code(double x) { double t_0 = Math.exp(-x); return (Math.exp(x) - t_0) / (Math.exp(x) + t_0); }
def code(x): t_0 = math.exp(-x) return (math.exp(x) - t_0) / (math.exp(x) + t_0)
function code(x) t_0 = exp(Float64(-x)) return Float64(Float64(exp(x) - t_0) / Float64(exp(x) + t_0)) end
function tmp = code(x) t_0 = exp(-x); tmp = (exp(x) - t_0) / (exp(x) + t_0); end
code[x_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, N[(N[(N[Exp[x], $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := e^{-x}\\ \frac{e^{x} - t\_0}{e^{x} + t\_0} \end{array} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (let* ((t_0 (exp (- x)))) (/ (- (exp x) t_0) (+ (exp x) t_0))))
double code(double x) { double t_0 = exp(-x); return (exp(x) - t_0) / (exp(x) + t_0); }
real(8) function code(x) real(8), intent (in) :: x real(8) :: t_0 t_0 = exp(-x) code = (exp(x) - t_0) / (exp(x) + t_0) end function
public static double code(double x) { double t_0 = Math.exp(-x); return (Math.exp(x) - t_0) / (Math.exp(x) + t_0); }
def code(x): t_0 = math.exp(-x) return (math.exp(x) - t_0) / (math.exp(x) + t_0)
function code(x) t_0 = exp(Float64(-x)) return Float64(Float64(exp(x) - t_0) / Float64(exp(x) + t_0)) end
function tmp = code(x) t_0 = exp(-x); tmp = (exp(x) - t_0) / (exp(x) + t_0); end
code[x_] := Block[{t$95$0 = N[Exp[(-x)], $MachinePrecision]}, N[(N[(N[Exp[x], $MachinePrecision] - t$95$0), $MachinePrecision] / N[(N[Exp[x], $MachinePrecision] + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := e^{-x}\\ \frac{e^{x} - t\_0}{e^{x} + t\_0} \end{array} \end{array}
(FPCore (x) :precision binary64 (tanh x))
double code(double x) { return tanh(x); }
real(8) function code(x) real(8), intent (in) :: x code = tanh(x) end function
public static double code(double x) { return Math.tanh(x); }
def code(x): return math.tanh(x)
function code(x) return tanh(x) end
function tmp = code(x) tmp = tanh(x); end
code[x_] := N[Tanh[x], $MachinePrecision]
\begin{array}{l} \\ \tanh x \end{array}
Initial program 9.8%
lift-/.f64
N/A
lift--.f64
N/A
lift-exp.f64
N/A
lift-exp.f64
N/A
lift-neg.f64
N/A
lift-+.f64
N/A
lift-exp.f64
N/A
lift-exp.f64
N/A
lift-neg.f64
N/A
tanh-undef
N/A
lower-tanh.f64
100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 (fma (* (fma (* x x) 0.13333333333333333 -0.3333333333333333) x) (* x x) x))
double code(double x) { return fma((fma((x * x), 0.13333333333333333, -0.3333333333333333) * x), (x * x), x); }
function code(x) return fma(Float64(fma(Float64(x * x), 0.13333333333333333, -0.3333333333333333) * x), Float64(x * x), x) end
code[x_] := N[(N[(N[(N[(x * x), $MachinePrecision] * 0.13333333333333333 + -0.3333333333333333), $MachinePrecision] * x), $MachinePrecision] * N[(x * x), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(x \cdot x, 0.13333333333333333, -0.3333333333333333\right) \cdot x, x \cdot x, x\right) \end{array}
Initial program 9.8%
Taylor expanded in x around 0
+-commutative
N/A
distribute-lft-in
N/A
associate-*r*
N/A
*-rgt-identity
N/A
lower-fma.f64
N/A
*-commutative
N/A
pow-plus
N/A
lower-pow.f64
N/A
metadata-eval
N/A
sub-neg
N/A
metadata-eval
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
99.0
Applied rewrites99.0%
Applied rewrites99.0%
(FPCore (x) :precision binary64 (fma (* x x) (* x -0.3333333333333333) x))
double code(double x) { return fma((x * x), (x * -0.3333333333333333), x); }
function code(x) return fma(Float64(x * x), Float64(x * -0.3333333333333333), x) end
code[x_] := N[(N[(x * x), $MachinePrecision] * N[(x * -0.3333333333333333), $MachinePrecision] + x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(x \cdot x, x \cdot -0.3333333333333333, x\right) \end{array}
Initial program 9.8%
Taylor expanded in x around 0
+-commutative
N/A
distribute-lft-in
N/A
*-commutative
N/A
associate-*r*
N/A
*-rgt-identity
N/A
lower-fma.f64
N/A
*-commutative
N/A
pow-plus
N/A
lower-pow.f64
N/A
metadata-eval
98.5
Applied rewrites98.5%
Applied rewrites98.5%
herbie shell --seed 1
(FPCore (x)
:name "(exp(x) - exp(-x)) / (exp(x) + exp(-x))"
:precision binary64
:pre (and (<= -1000.0 x) (<= x 1000.0))
(/ (- (exp x) (exp (- x))) (+ (exp x) (exp (- x)))))