(FPCore (x) :precision binary64 (/ 1.0 (+ (* 2.0 (cosh x)) 2.0)))
double code(double x) { return 1.0 / ((2.0 * cosh(x)) + 2.0); }
real(8) function code(x) real(8), intent (in) :: x code = 1.0d0 / ((2.0d0 * cosh(x)) + 2.0d0) end function
public static double code(double x) { return 1.0 / ((2.0 * Math.cosh(x)) + 2.0); }
def code(x): return 1.0 / ((2.0 * math.cosh(x)) + 2.0)
function code(x) return Float64(1.0 / Float64(Float64(2.0 * cosh(x)) + 2.0)) end
function tmp = code(x) tmp = 1.0 / ((2.0 * cosh(x)) + 2.0); end
code[x_] := N[(1.0 / N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{1}{2 \cdot \cosh x + 2} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (/ 1.0 (+ (* 2.0 (cosh x)) 2.0)))
double code(double x) { return 1.0 / ((2.0 * cosh(x)) + 2.0); }
real(8) function code(x) real(8), intent (in) :: x code = 1.0d0 / ((2.0d0 * cosh(x)) + 2.0d0) end function
public static double code(double x) { return 1.0 / ((2.0 * Math.cosh(x)) + 2.0); }
def code(x): return 1.0 / ((2.0 * math.cosh(x)) + 2.0)
function code(x) return Float64(1.0 / Float64(Float64(2.0 * cosh(x)) + 2.0)) end
function tmp = code(x) tmp = 1.0 / ((2.0 * cosh(x)) + 2.0); end
code[x_] := N[(1.0 / N[(N[(2.0 * N[Cosh[x], $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{1}{2 \cdot \cosh x + 2} \end{array}
(FPCore (x) :precision binary64 (pow (+ (+ 2.0 (exp (- x))) (exp x)) -1.0))
double code(double x) { return pow(((2.0 + exp(-x)) + exp(x)), -1.0); }
real(8) function code(x) real(8), intent (in) :: x code = ((2.0d0 + exp(-x)) + exp(x)) ** (-1.0d0) end function
public static double code(double x) { return Math.pow(((2.0 + Math.exp(-x)) + Math.exp(x)), -1.0); }
def code(x): return math.pow(((2.0 + math.exp(-x)) + math.exp(x)), -1.0)
function code(x) return Float64(Float64(2.0 + exp(Float64(-x))) + exp(x)) ^ -1.0 end
function tmp = code(x) tmp = ((2.0 + exp(-x)) + exp(x)) ^ -1.0; end
code[x_] := N[Power[N[(N[(2.0 + N[Exp[(-x)], $MachinePrecision]), $MachinePrecision] + N[Exp[x], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(\left(2 + e^{-x}\right) + e^{x}\right)}^{-1} \end{array}
Initial program 100.0%
lift-+.f64
N/A
+-commutative
N/A
lift-*.f64
N/A
lift-cosh.f64
N/A
cosh-undef
N/A
+-commutative
N/A
associate-+r+
N/A
lower-+.f64
N/A
lower-+.f64
N/A
lower-exp.f64
N/A
lower-neg.f64
N/A
lower-exp.f64
100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (if (<= x 3.5) (pow (+ (* 2.0 (fma (fma (fma 0.001388888888888889 (* x x) 0.041666666666666664) (* x x) 0.5) (* x x) 1.0)) 2.0) -1.0) (pow (+ 3.0 (exp x)) -1.0)))
double code(double x) { double tmp; if (x <= 3.5) { tmp = pow(((2.0 * fma(fma(fma(0.001388888888888889, (x * x), 0.041666666666666664), (x * x), 0.5), (x * x), 1.0)) + 2.0), -1.0); } else { tmp = pow((3.0 + exp(x)), -1.0); } return tmp; }
function code(x) tmp = 0.0 if (x <= 3.5) tmp = Float64(Float64(2.0 * fma(fma(fma(0.001388888888888889, Float64(x * x), 0.041666666666666664), Float64(x * x), 0.5), Float64(x * x), 1.0)) + 2.0) ^ -1.0; else tmp = Float64(3.0 + exp(x)) ^ -1.0; end return tmp end
code[x_] := If[LessEqual[x, 3.5], N[Power[N[(N[(2.0 * N[(N[(N[(0.001388888888888889 * N[(x * x), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision], -1.0], $MachinePrecision], N[Power[N[(3.0 + N[Exp[x], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;x \leq 3.5:\\ \;\;\;\;{\left(2 \cdot \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x \cdot x, 0.041666666666666664\right), x \cdot x, 0.5\right), x \cdot x, 1\right) + 2\right)}^{-1}\\ \mathbf{else}:\\ \;\;\;\;{\left(3 + e^{x}\right)}^{-1}\\ \end{array} \end{array}
if x < 3.5
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
96.8
Applied rewrites96.8%
if 3.5 < x
Initial program 100.0%
lift-+.f64
N/A
+-commutative
N/A
lift-*.f64
N/A
lift-cosh.f64
N/A
cosh-undef
N/A
+-commutative
N/A
associate-+r+
N/A
lower-+.f64
N/A
lower-+.f64
N/A
lower-exp.f64
N/A
lower-neg.f64
N/A
lower-exp.f64
100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites100.0%
Final simplification97.7%
(FPCore (x) :precision binary64 (pow (fma (cosh x) 2.0 2.0) -1.0))
double code(double x) { return pow(fma(cosh(x), 2.0, 2.0), -1.0); }
function code(x) return fma(cosh(x), 2.0, 2.0) ^ -1.0 end
code[x_] := N[Power[N[(N[Cosh[x], $MachinePrecision] * 2.0 + 2.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(\mathsf{fma}\left(\cosh x, 2, 2\right)\right)}^{-1} \end{array}
Initial program 100.0%
lift-+.f64
N/A
lift-*.f64
N/A
*-commutative
N/A
lower-fma.f64
100.0
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (pow (+ (* 2.0 (fma (fma (fma 0.001388888888888889 (* x x) 0.041666666666666664) (* x x) 0.5) (* x x) 1.0)) 2.0) -1.0))
double code(double x) { return pow(((2.0 * fma(fma(fma(0.001388888888888889, (x * x), 0.041666666666666664), (x * x), 0.5), (x * x), 1.0)) + 2.0), -1.0); }
function code(x) return Float64(Float64(2.0 * fma(fma(fma(0.001388888888888889, Float64(x * x), 0.041666666666666664), Float64(x * x), 0.5), Float64(x * x), 1.0)) + 2.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(2.0 * N[(N[(N[(0.001388888888888889 * N[(x * x), $MachinePrecision] + 0.041666666666666664), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] + 2.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(2 \cdot \mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.001388888888888889, x \cdot x, 0.041666666666666664\right), x \cdot x, 0.5\right), x \cdot x, 1\right) + 2\right)}^{-1} \end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
92.9
Applied rewrites92.9%
Final simplification92.9%
(FPCore (x) :precision binary64 (pow (fma (fma (fma 0.041666666666666664 (* x x) 0.5) (* x x) 1.0) 2.0 2.0) -1.0))
double code(double x) { return pow(fma(fma(fma(0.041666666666666664, (x * x), 0.5), (x * x), 1.0), 2.0, 2.0), -1.0); }
function code(x) return fma(fma(fma(0.041666666666666664, Float64(x * x), 0.5), Float64(x * x), 1.0), 2.0, 2.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[(0.041666666666666664 * N[(x * x), $MachinePrecision] + 0.5), $MachinePrecision] * N[(x * x), $MachinePrecision] + 1.0), $MachinePrecision] * 2.0 + 2.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.041666666666666664, x \cdot x, 0.5\right), x \cdot x, 1\right), 2, 2\right)\right)}^{-1} \end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
88.8
Applied rewrites88.8%
lift-+.f64
N/A
lift-*.f64
N/A
*-commutative
N/A
lower-fma.f64
88.8
Applied rewrites88.8%
Final simplification88.8%
(FPCore (x) :precision binary64 (pow (+ (fma x x 2.0) 2.0) -1.0))
double code(double x) { return pow((fma(x, x, 2.0) + 2.0), -1.0); }
function code(x) return Float64(fma(x, x, 2.0) + 2.0) ^ -1.0 end
code[x_] := N[Power[N[(N[(x * x + 2.0), $MachinePrecision] + 2.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(\mathsf{fma}\left(x, x, 2\right) + 2\right)}^{-1} \end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
unpow2
N/A
lower-fma.f64
75.4
Applied rewrites75.4%
Final simplification75.4%
(FPCore (x) :precision binary64 (pow (fma x x 4.0) -1.0))
double code(double x) { return pow(fma(x, x, 4.0), -1.0); }
function code(x) return fma(x, x, 4.0) ^ -1.0 end
code[x_] := N[Power[N[(x * x + 4.0), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(\mathsf{fma}\left(x, x, 4\right)\right)}^{-1} \end{array}
Initial program 100.0%
Taylor expanded in x around 0
+-commutative
N/A
unpow2
N/A
lower-fma.f64
75.4
Applied rewrites75.4%
Final simplification75.4%
(FPCore (x) :precision binary64 0.25)
double code(double x) { return 0.25; }
real(8) function code(x) real(8), intent (in) :: x code = 0.25d0 end function
public static double code(double x) { return 0.25; }
def code(x): return 0.25
function code(x) return 0.25 end
function tmp = code(x) tmp = 0.25; end
code[x_] := 0.25
\begin{array}{l} \\ 0.25 \end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites50.3%
herbie shell --seed 1
(FPCore (x)
:name "1 / (2 * cosh(x) + 2)"
:precision binary64
:pre (and (<= -1.79e+308 x) (<= x 1.79e+308))
(/ 1.0 (+ (* 2.0 (cosh x)) 2.0)))