(FPCore (a) :precision binary64 (+ (* (tanh (* a 200.0)) 0.9) (* (/ a (fabs a)) 0.1)))
double code(double a) { return (tanh((a * 200.0)) * 0.9) + ((a / fabs(a)) * 0.1); }
real(8) function code(a) real(8), intent (in) :: a code = (tanh((a * 200.0d0)) * 0.9d0) + ((a / abs(a)) * 0.1d0) end function
public static double code(double a) { return (Math.tanh((a * 200.0)) * 0.9) + ((a / Math.abs(a)) * 0.1); }
def code(a): return (math.tanh((a * 200.0)) * 0.9) + ((a / math.fabs(a)) * 0.1)
function code(a) return Float64(Float64(tanh(Float64(a * 200.0)) * 0.9) + Float64(Float64(a / abs(a)) * 0.1)) end
function tmp = code(a) tmp = (tanh((a * 200.0)) * 0.9) + ((a / abs(a)) * 0.1); end
code[a_] := N[(N[(N[Tanh[N[(a * 200.0), $MachinePrecision]], $MachinePrecision] * 0.9), $MachinePrecision] + N[(N[(a / N[Abs[a], $MachinePrecision]), $MachinePrecision] * 0.1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tanh \left(a \cdot 200\right) \cdot 0.9 + \frac{a}{\left|a\right|} \cdot 0.1 \end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (a) :precision binary64 (+ (* (tanh (* a 200.0)) 0.9) (* (/ a (fabs a)) 0.1)))
double code(double a) { return (tanh((a * 200.0)) * 0.9) + ((a / fabs(a)) * 0.1); }
real(8) function code(a) real(8), intent (in) :: a code = (tanh((a * 200.0d0)) * 0.9d0) + ((a / abs(a)) * 0.1d0) end function
public static double code(double a) { return (Math.tanh((a * 200.0)) * 0.9) + ((a / Math.abs(a)) * 0.1); }
def code(a): return (math.tanh((a * 200.0)) * 0.9) + ((a / math.fabs(a)) * 0.1)
function code(a) return Float64(Float64(tanh(Float64(a * 200.0)) * 0.9) + Float64(Float64(a / abs(a)) * 0.1)) end
function tmp = code(a) tmp = (tanh((a * 200.0)) * 0.9) + ((a / abs(a)) * 0.1); end
code[a_] := N[(N[(N[Tanh[N[(a * 200.0), $MachinePrecision]], $MachinePrecision] * 0.9), $MachinePrecision] + N[(N[(a / N[Abs[a], $MachinePrecision]), $MachinePrecision] * 0.1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tanh \left(a \cdot 200\right) \cdot 0.9 + \frac{a}{\left|a\right|} \cdot 0.1 \end{array}
(FPCore (a) :precision binary64 (fma (/ a (fabs a)) 0.1 (* 0.9 (tanh (* 200.0 a)))))
double code(double a) { return fma((a / fabs(a)), 0.1, (0.9 * tanh((200.0 * a)))); }
function code(a) return fma(Float64(a / abs(a)), 0.1, Float64(0.9 * tanh(Float64(200.0 * a)))) end
code[a_] := N[(N[(a / N[Abs[a], $MachinePrecision]), $MachinePrecision] * 0.1 + N[(0.9 * N[Tanh[N[(200.0 * a), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\frac{a}{\left|a\right|}, 0.1, 0.9 \cdot \tanh \left(200 \cdot a\right)\right) \end{array}
Initial program 100.0%
lift-+.f64
N/A
+-commutative
N/A
lift-*.f64
N/A
lower-fma.f64
100.0
lift-*.f64
N/A
*-commutative
N/A
lower-*.f64
100.0
lift-*.f64
N/A
*-commutative
N/A
lower-*.f64
100.0
Applied rewrites100.0%
(FPCore (a) :precision binary64 (* (/ a (fabs a)) 0.1))
double code(double a) { return (a / fabs(a)) * 0.1; }
real(8) function code(a) real(8), intent (in) :: a code = (a / abs(a)) * 0.1d0 end function
public static double code(double a) { return (a / Math.abs(a)) * 0.1; }
def code(a): return (a / math.fabs(a)) * 0.1
function code(a) return Float64(Float64(a / abs(a)) * 0.1) end
function tmp = code(a) tmp = (a / abs(a)) * 0.1; end
code[a_] := N[(N[(a / N[Abs[a], $MachinePrecision]), $MachinePrecision] * 0.1), $MachinePrecision]
\begin{array}{l} \\ \frac{a}{\left|a\right|} \cdot 0.1 \end{array}
Initial program 100.0%
Taylor expanded in a around inf
*-lft-identity
N/A
associate-*l/
N/A
associate-*l*
N/A
lower-*.f64
N/A
associate-*r/
N/A
metadata-eval
N/A
lower-/.f64
N/A
lower-fabs.f64
57.6
Applied rewrites57.6%
Applied rewrites57.7%
(FPCore (a) :precision binary64 (* (/ 0.1 (fabs a)) a))
double code(double a) { return (0.1 / fabs(a)) * a; }
real(8) function code(a) real(8), intent (in) :: a code = (0.1d0 / abs(a)) * a end function
public static double code(double a) { return (0.1 / Math.abs(a)) * a; }
def code(a): return (0.1 / math.fabs(a)) * a
function code(a) return Float64(Float64(0.1 / abs(a)) * a) end
function tmp = code(a) tmp = (0.1 / abs(a)) * a; end
code[a_] := N[(N[(0.1 / N[Abs[a], $MachinePrecision]), $MachinePrecision] * a), $MachinePrecision]
\begin{array}{l} \\ \frac{0.1}{\left|a\right|} \cdot a \end{array}
Initial program 100.0%
Taylor expanded in a around inf
*-lft-identity
N/A
associate-*l/
N/A
associate-*l*
N/A
lower-*.f64
N/A
associate-*r/
N/A
metadata-eval
N/A
lower-/.f64
N/A
lower-fabs.f64
57.6
Applied rewrites57.6%
(FPCore (a) :precision binary64 (* (* (* a a) -2400000.0) a))
double code(double a) { return ((a * a) * -2400000.0) * a; }
real(8) function code(a) real(8), intent (in) :: a code = ((a * a) * (-2400000.0d0)) * a end function
public static double code(double a) { return ((a * a) * -2400000.0) * a; }
def code(a): return ((a * a) * -2400000.0) * a
function code(a) return Float64(Float64(Float64(a * a) * -2400000.0) * a) end
function tmp = code(a) tmp = ((a * a) * -2400000.0) * a; end
code[a_] := N[(N[(N[(a * a), $MachinePrecision] * -2400000.0), $MachinePrecision] * a), $MachinePrecision]
\begin{array}{l} \\ \left(\left(a \cdot a\right) \cdot -2400000\right) \cdot a \end{array}
Initial program 100.0%
Taylor expanded in a around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
associate-+l+
N/A
*-commutative
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
associate-*r/
N/A
metadata-eval
N/A
lower-/.f64
N/A
lower-fabs.f64
50.5
Applied rewrites50.5%
Taylor expanded in a around inf
Applied rewrites1.8%
herbie shell --seed 1
(FPCore (a)
:name "tanh(a * 200) * 0.9 + (a / abs(a)) * 0.1"
:precision binary64
:pre (and (<= -1.79e+308 a) (<= a 1.79e+308))
(+ (* (tanh (* a 200.0)) 0.9) (* (/ a (fabs a)) 0.1)))