(FPCore (x y) :precision binary64 (- (atan x) (atan y)))
double code(double x, double y) { return atan(x) - atan(y); }
real(8) function code(x, y) real(8), intent (in) :: x real(8), intent (in) :: y code = atan(x) - atan(y) end function
public static double code(double x, double y) { return Math.atan(x) - Math.atan(y); }
def code(x, y): return math.atan(x) - math.atan(y)
function code(x, y) return Float64(atan(x) - atan(y)) end
function tmp = code(x, y) tmp = atan(x) - atan(y); end
code[x_, y_] := N[(N[ArcTan[x], $MachinePrecision] - N[ArcTan[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tan^{-1} x - \tan^{-1} y \end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x y) :precision binary64 (- (atan x) (atan y)))
double code(double x, double y) { return atan(x) - atan(y); }
real(8) function code(x, y) real(8), intent (in) :: x real(8), intent (in) :: y code = atan(x) - atan(y) end function
public static double code(double x, double y) { return Math.atan(x) - Math.atan(y); }
def code(x, y): return math.atan(x) - math.atan(y)
function code(x, y) return Float64(atan(x) - atan(y)) end
function tmp = code(x, y) tmp = atan(x) - atan(y); end
code[x_, y_] := N[(N[ArcTan[x], $MachinePrecision] - N[ArcTan[y], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tan^{-1} x - \tan^{-1} y \end{array}
(FPCore (x y) :precision binary64 (atan2 (- x y) (fma x y 1.0)))
double code(double x, double y) { return atan2((x - y), fma(x, y, 1.0)); }
function code(x, y) return atan(Float64(x - y), fma(x, y, 1.0)) end
code[x_, y_] := N[ArcTan[N[(x - y), $MachinePrecision] / N[(x * y + 1.0), $MachinePrecision]], $MachinePrecision]
\begin{array}{l} \\ \tan^{-1}_* \frac{x - y}{\mathsf{fma}\left(x, y, 1\right)} \end{array}
Initial program 100.0%
lift--.f64
N/A
lift-atan.f64
N/A
lift-atan.f64
N/A
diff-atan
N/A
lower-atan2.f64
N/A
lower--.f64
N/A
+-commutative
N/A
lower-fma.f64
100.0
Applied rewrites100.0%
(FPCore (x y) :precision binary64 (atan2 (- x y) 1.0))
double code(double x, double y) { return atan2((x - y), 1.0); }
real(8) function code(x, y) real(8), intent (in) :: x real(8), intent (in) :: y code = atan2((x - y), 1.0d0) end function
public static double code(double x, double y) { return Math.atan2((x - y), 1.0); }
def code(x, y): return math.atan2((x - y), 1.0)
function code(x, y) return atan(Float64(x - y), 1.0) end
function tmp = code(x, y) tmp = atan2((x - y), 1.0); end
code[x_, y_] := N[ArcTan[N[(x - y), $MachinePrecision] / 1.0], $MachinePrecision]
\begin{array}{l} \\ \tan^{-1}_* \frac{x - y}{1} \end{array}
Initial program 100.0%
lift--.f64
N/A
lift-atan.f64
N/A
lift-atan.f64
N/A
diff-atan
N/A
lower-atan2.f64
N/A
lower--.f64
N/A
+-commutative
N/A
lower-fma.f64
100.0
Applied rewrites100.0%
Taylor expanded in x around 0
Applied rewrites99.6%
herbie shell --seed 5
(FPCore (x y)
:name "atan(x) - atan(y)"
:precision binary64
:pre (and (and (<= 0.0 x) (<= x 1000000000.0)) (and (<= 0.0 y) (<= y 1000000000.0)))
(- (atan x) (atan y)))