(FPCore (x) :precision binary64 (- (+ (- 1.0 (cos x)) (/ 1.0 x)) (/ 1.0 (tan x))))
double code(double x) { return ((1.0 - cos(x)) + (1.0 / x)) - (1.0 / tan(x)); }
real(8) function code(x) real(8), intent (in) :: x code = ((1.0d0 - cos(x)) + (1.0d0 / x)) - (1.0d0 / tan(x)) end function
public static double code(double x) { return ((1.0 - Math.cos(x)) + (1.0 / x)) - (1.0 / Math.tan(x)); }
def code(x): return ((1.0 - math.cos(x)) + (1.0 / x)) - (1.0 / math.tan(x))
function code(x) return Float64(Float64(Float64(1.0 - cos(x)) + Float64(1.0 / x)) - Float64(1.0 / tan(x))) end
function tmp = code(x) tmp = ((1.0 - cos(x)) + (1.0 / x)) - (1.0 / tan(x)); end
code[x_] := N[(N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] + N[(1.0 / x), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \left(\left(1 - \cos x\right) + \frac{1}{x}\right) - \frac{1}{\tan x} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (- (+ (- 1.0 (cos x)) (/ 1.0 x)) (/ 1.0 (tan x))))
double code(double x) { return ((1.0 - cos(x)) + (1.0 / x)) - (1.0 / tan(x)); }
real(8) function code(x) real(8), intent (in) :: x code = ((1.0d0 - cos(x)) + (1.0d0 / x)) - (1.0d0 / tan(x)) end function
public static double code(double x) { return ((1.0 - Math.cos(x)) + (1.0 / x)) - (1.0 / Math.tan(x)); }
def code(x): return ((1.0 - math.cos(x)) + (1.0 / x)) - (1.0 / math.tan(x))
function code(x) return Float64(Float64(Float64(1.0 - cos(x)) + Float64(1.0 / x)) - Float64(1.0 / tan(x))) end
function tmp = code(x) tmp = ((1.0 - cos(x)) + (1.0 / x)) - (1.0 / tan(x)); end
code[x_] := N[(N[(N[(1.0 - N[Cos[x], $MachinePrecision]), $MachinePrecision] + N[(1.0 / x), $MachinePrecision]), $MachinePrecision] - N[(1.0 / N[Tan[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \left(\left(1 - \cos x\right) + \frac{1}{x}\right) - \frac{1}{\tan x} \end{array}
(FPCore (x) :precision binary64 (/ (* (fma 0.125 (pow x 3.0) 0.037037037037037035) x) (fma 0.25 (* x x) (- 0.1111111111111111 (* x 0.16666666666666666)))))
double code(double x) { return (fma(0.125, pow(x, 3.0), 0.037037037037037035) * x) / fma(0.25, (x * x), (0.1111111111111111 - (x * 0.16666666666666666))); }
function code(x) return Float64(Float64(fma(0.125, (x ^ 3.0), 0.037037037037037035) * x) / fma(0.25, Float64(x * x), Float64(0.1111111111111111 - Float64(x * 0.16666666666666666)))) end
code[x_] := N[(N[(N[(0.125 * N[Power[x, 3.0], $MachinePrecision] + 0.037037037037037035), $MachinePrecision] * x), $MachinePrecision] / N[(0.25 * N[(x * x), $MachinePrecision] + N[(0.1111111111111111 - N[(x * 0.16666666666666666), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\mathsf{fma}\left(0.125, {x}^{3}, 0.037037037037037035\right) \cdot x}{\mathsf{fma}\left(0.25, x \cdot x, 0.1111111111111111 - x \cdot 0.16666666666666666\right)} \end{array}
Initial program 5.7%
Taylor expanded in x around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
99.5
Applied rewrites99.5%
Applied rewrites99.6%
(FPCore (x) :precision binary64 (/ (* (fma 0.25 (* x x) -0.1111111111111111) x) (fma 0.5 x -0.3333333333333333)))
double code(double x) { return (fma(0.25, (x * x), -0.1111111111111111) * x) / fma(0.5, x, -0.3333333333333333); }
function code(x) return Float64(Float64(fma(0.25, Float64(x * x), -0.1111111111111111) * x) / fma(0.5, x, -0.3333333333333333)) end
code[x_] := N[(N[(N[(0.25 * N[(x * x), $MachinePrecision] + -0.1111111111111111), $MachinePrecision] * x), $MachinePrecision] / N[(0.5 * x + -0.3333333333333333), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\mathsf{fma}\left(0.25, x \cdot x, -0.1111111111111111\right) \cdot x}{\mathsf{fma}\left(0.5, x, -0.3333333333333333\right)} \end{array}
Initial program 5.7%
Taylor expanded in x around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
99.5
Applied rewrites99.5%
Applied rewrites99.6%
(FPCore (x) :precision binary64 (* (fma 0.5 x 0.3333333333333333) x))
double code(double x) { return fma(0.5, x, 0.3333333333333333) * x; }
function code(x) return Float64(fma(0.5, x, 0.3333333333333333) * x) end
code[x_] := N[(N[(0.5 * x + 0.3333333333333333), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(0.5, x, 0.3333333333333333\right) \cdot x \end{array}
Initial program 5.7%
Taylor expanded in x around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
99.5
Applied rewrites99.5%
(FPCore (x) :precision binary64 (* 0.3333333333333333 x))
double code(double x) { return 0.3333333333333333 * x; }
real(8) function code(x) real(8), intent (in) :: x code = 0.3333333333333333d0 * x end function
public static double code(double x) { return 0.3333333333333333 * x; }
def code(x): return 0.3333333333333333 * x
function code(x) return Float64(0.3333333333333333 * x) end
function tmp = code(x) tmp = 0.3333333333333333 * x; end
code[x_] := N[(0.3333333333333333 * x), $MachinePrecision]
\begin{array}{l} \\ 0.3333333333333333 \cdot x \end{array}
Initial program 5.7%
Taylor expanded in x around 0
lower-*.f64
99.0
Applied rewrites99.0%
herbie shell --seed 1
(FPCore (x)
:name "1-cos(x) + 1/x-1/tan(x)"
:precision binary64
:pre (and (<= -1e-9 x) (<= x 0.0))
(- (+ (- 1.0 (cos x)) (/ 1.0 x)) (/ 1.0 (tan x))))