(FPCore (x) :precision binary64 (tanh x))
double code(double x) { return tanh(x); }
real(8) function code(x) real(8), intent (in) :: x code = tanh(x) end function
public static double code(double x) { return Math.tanh(x); }
def code(x): return math.tanh(x)
function code(x) return tanh(x) end
function tmp = code(x) tmp = tanh(x); end
code[x_] := N[Tanh[x], $MachinePrecision]
\begin{array}{l} \\ \tanh x \end{array}
Sampling outcomes in binary64 precision:
Herbie found 3 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (tanh x))
double code(double x) { return tanh(x); }
real(8) function code(x) real(8), intent (in) :: x code = tanh(x) end function
public static double code(double x) { return Math.tanh(x); }
def code(x): return math.tanh(x)
function code(x) return tanh(x) end
function tmp = code(x) tmp = tanh(x); end
code[x_] := N[Tanh[x], $MachinePrecision]
\begin{array}{l} \\ \tanh x \end{array}
(FPCore (x) :precision binary64 (tanh x))
double code(double x) { return tanh(x); }
real(8) function code(x) real(8), intent (in) :: x code = tanh(x) end function
public static double code(double x) { return Math.tanh(x); }
def code(x): return math.tanh(x)
function code(x) return tanh(x) end
function tmp = code(x) tmp = tanh(x); end
code[x_] := N[Tanh[x], $MachinePrecision]
\begin{array}{l} \\ \tanh x \end{array}
Initial program 100.0%
(FPCore (x) :precision binary64 (fma (* (* x x) x) -0.3333333333333333 x))
double code(double x) { return fma(((x * x) * x), -0.3333333333333333, x); }
function code(x) return fma(Float64(Float64(x * x) * x), -0.3333333333333333, x) end
code[x_] := N[(N[(N[(x * x), $MachinePrecision] * x), $MachinePrecision] * -0.3333333333333333 + x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\left(x \cdot x\right) \cdot x, -0.3333333333333333, x\right) \end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
distribute-lft-in
N/A
*-commutative
N/A
associate-*r*
N/A
*-rgt-identity
N/A
lower-fma.f64
N/A
*-commutative
N/A
pow-plus
N/A
lower-pow.f64
N/A
metadata-eval
98.4
Applied rewrites98.4%
Applied rewrites98.4%
(FPCore (x) :precision binary64 (* (fma (* -0.3333333333333333 x) x 1.0) x))
double code(double x) { return fma((-0.3333333333333333 * x), x, 1.0) * x; }
function code(x) return Float64(fma(Float64(-0.3333333333333333 * x), x, 1.0) * x) end
code[x_] := N[(N[(N[(-0.3333333333333333 * x), $MachinePrecision] * x + 1.0), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(-0.3333333333333333 \cdot x, x, 1\right) \cdot x \end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
distribute-lft-in
N/A
*-commutative
N/A
associate-*r*
N/A
*-rgt-identity
N/A
lower-fma.f64
N/A
*-commutative
N/A
pow-plus
N/A
lower-pow.f64
N/A
metadata-eval
98.4
Applied rewrites98.4%
Applied rewrites98.4%
herbie shell --seed 1
(FPCore (x)
:name "tanh(x)"
:precision binary64
:pre (and (<= 0.0 x) (<= x 150.0))
(tanh x))