(FPCore (x) :precision binary64 (- (tan (+ x (pow 10.0 (- 12.0)))) (tan x)))
double code(double x) { return tan((x + pow(10.0, -12.0))) - tan(x); }
real(8) function code(x) real(8), intent (in) :: x code = tan((x + (10.0d0 ** -12.0d0))) - tan(x) end function
public static double code(double x) { return Math.tan((x + Math.pow(10.0, -12.0))) - Math.tan(x); }
def code(x): return math.tan((x + math.pow(10.0, -12.0))) - math.tan(x)
function code(x) return Float64(tan(Float64(x + (10.0 ^ Float64(-12.0)))) - tan(x)) end
function tmp = code(x) tmp = tan((x + (10.0 ^ -12.0))) - tan(x); end
code[x_] := N[(N[Tan[N[(x + N[Power[10.0, (-12.0)], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tan \left(x + {10}^{\left(-12\right)}\right) - \tan x \end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (- (tan (+ x (pow 10.0 (- 12.0)))) (tan x)))
double code(double x) { return tan((x + pow(10.0, -12.0))) - tan(x); }
real(8) function code(x) real(8), intent (in) :: x code = tan((x + (10.0d0 ** -12.0d0))) - tan(x) end function
public static double code(double x) { return Math.tan((x + Math.pow(10.0, -12.0))) - Math.tan(x); }
def code(x): return math.tan((x + math.pow(10.0, -12.0))) - math.tan(x)
function code(x) return Float64(tan(Float64(x + (10.0 ^ Float64(-12.0)))) - tan(x)) end
function tmp = code(x) tmp = tan((x + (10.0 ^ -12.0))) - tan(x); end
code[x_] := N[(N[Tan[N[(x + N[Power[10.0, (-12.0)], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tan \left(x + {10}^{\left(-12\right)}\right) - \tan x \end{array}
(FPCore (x) :precision binary64 (let* ((t_0 (/ (sin x) (cos x))) (t_1 (fma (/ (- (sin x)) (cos 1e-12)) (/ (sin 1e-12) (cos x)) 1.0))) (+ (- (/ t_0 t_1) t_0) (/ (/ (sin 1e-12) (cos 1e-12)) t_1))))
double code(double x) { double t_0 = sin(x) / cos(x); double t_1 = fma((-sin(x) / cos(1e-12)), (sin(1e-12) / cos(x)), 1.0); return ((t_0 / t_1) - t_0) + ((sin(1e-12) / cos(1e-12)) / t_1); }
function code(x) t_0 = Float64(sin(x) / cos(x)) t_1 = fma(Float64(Float64(-sin(x)) / cos(1e-12)), Float64(sin(1e-12) / cos(x)), 1.0) return Float64(Float64(Float64(t_0 / t_1) - t_0) + Float64(Float64(sin(1e-12) / cos(1e-12)) / t_1)) end
code[x_] := Block[{t$95$0 = N[(N[Sin[x], $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[((-N[Sin[x], $MachinePrecision]) / N[Cos[1e-12], $MachinePrecision]), $MachinePrecision] * N[(N[Sin[1e-12], $MachinePrecision] / N[Cos[x], $MachinePrecision]), $MachinePrecision] + 1.0), $MachinePrecision]}, N[(N[(N[(t$95$0 / t$95$1), $MachinePrecision] - t$95$0), $MachinePrecision] + N[(N[(N[Sin[1e-12], $MachinePrecision] / N[Cos[1e-12], $MachinePrecision]), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{\sin x}{\cos x}\\ t_1 := \mathsf{fma}\left(\frac{-\sin x}{\cos \left( 10^{-12} \right)}, \frac{\sin \left( 10^{-12} \right)}{\cos x}, 1\right)\\ \left(\frac{t\_0}{t\_1} - t\_0\right) + \frac{\frac{\sin \left( 10^{-12} \right)}{\cos \left( 10^{-12} \right)}}{t\_1} \end{array} \end{array}
Initial program 48.1%
lift-tan.f64
N/A
lift-+.f64
N/A
tan-sum
N/A
clear-num
N/A
inv-pow
N/A
div-inv
N/A
unpow-prod-down
N/A
inv-pow
N/A
lower-*.f64
N/A
Applied rewrites68.9%
Taylor expanded in x around inf
associate--l+
N/A
+-commutative
N/A
lower-+.f64
N/A
Applied rewrites69.9%
(FPCore (x) :precision binary64 (fma (/ (+ (tan 1e-12) (tan x)) (- 1.0 (pow (* (tan 1e-12) (tan x)) 2.0))) (fma (tan 1e-12) (tan x) 1.0) (- (tan x))))
double code(double x) { return fma(((tan(1e-12) + tan(x)) / (1.0 - pow((tan(1e-12) * tan(x)), 2.0))), fma(tan(1e-12), tan(x), 1.0), -tan(x)); }
function code(x) return fma(Float64(Float64(tan(1e-12) + tan(x)) / Float64(1.0 - (Float64(tan(1e-12) * tan(x)) ^ 2.0))), fma(tan(1e-12), tan(x), 1.0), Float64(-tan(x))) end
code[x_] := N[(N[(N[(N[Tan[1e-12], $MachinePrecision] + N[Tan[x], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[Power[N[(N[Tan[1e-12], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[1e-12], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision] + (-N[Tan[x], $MachinePrecision])), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\frac{\tan \left( 10^{-12} \right) + \tan x}{1 - {\left(\tan \left( 10^{-12} \right) \cdot \tan x\right)}^{2}}, \mathsf{fma}\left(\tan \left( 10^{-12} \right), \tan x, 1\right), -\tan x\right) \end{array}
Initial program 48.1%
lift--.f64
N/A
sub-neg
N/A
lift-tan.f64
N/A
lift-+.f64
N/A
tan-sum
N/A
flip--
N/A
associate-/r/
N/A
lower-fma.f64
N/A
Applied rewrites69.7%
(FPCore (x) :precision binary64 (- (* (/ (+ (tan 1e-12) (tan x)) (- 1.0 (pow (* (tan 1e-12) (tan x)) 2.0))) (fma (tan 1e-12) (tan x) 1.0)) (tan x)))
double code(double x) { return (((tan(1e-12) + tan(x)) / (1.0 - pow((tan(1e-12) * tan(x)), 2.0))) * fma(tan(1e-12), tan(x), 1.0)) - tan(x); }
function code(x) return Float64(Float64(Float64(Float64(tan(1e-12) + tan(x)) / Float64(1.0 - (Float64(tan(1e-12) * tan(x)) ^ 2.0))) * fma(tan(1e-12), tan(x), 1.0)) - tan(x)) end
code[x_] := N[(N[(N[(N[(N[Tan[1e-12], $MachinePrecision] + N[Tan[x], $MachinePrecision]), $MachinePrecision] / N[(1.0 - N[Power[N[(N[Tan[1e-12], $MachinePrecision] * N[Tan[x], $MachinePrecision]), $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[(N[Tan[1e-12], $MachinePrecision] * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\tan \left( 10^{-12} \right) + \tan x}{1 - {\left(\tan \left( 10^{-12} \right) \cdot \tan x\right)}^{2}} \cdot \mathsf{fma}\left(\tan \left( 10^{-12} \right), \tan x, 1\right) - \tan x \end{array}
Initial program 48.1%
lift-tan.f64
N/A
lift-+.f64
N/A
tan-sum
N/A
flip--
N/A
associate-/r/
N/A
lower-*.f64
N/A
Applied rewrites69.3%
(FPCore (x) :precision binary64 (pow (pow (- (/ (+ (tan x) (tan 1e-12)) (fma (- (tan 1e-12)) (tan x) 1.0)) (tan x)) -1.0) -1.0))
double code(double x) { return pow(pow((((tan(x) + tan(1e-12)) / fma(-tan(1e-12), tan(x), 1.0)) - tan(x)), -1.0), -1.0); }
function code(x) return (Float64(Float64(Float64(tan(x) + tan(1e-12)) / fma(Float64(-tan(1e-12)), tan(x), 1.0)) - tan(x)) ^ -1.0) ^ -1.0 end
code[x_] := N[Power[N[Power[N[(N[(N[(N[Tan[x], $MachinePrecision] + N[Tan[1e-12], $MachinePrecision]), $MachinePrecision] / N[((-N[Tan[1e-12], $MachinePrecision]) * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left({\left(\frac{\tan x + \tan \left( 10^{-12} \right)}{\mathsf{fma}\left(-\tan \left( 10^{-12} \right), \tan x, 1\right)} - \tan x\right)}^{-1}\right)}^{-1} \end{array}
Initial program 48.1%
lift-tan.f64
N/A
lift-+.f64
N/A
tan-sum
N/A
clear-num
N/A
inv-pow
N/A
div-inv
N/A
unpow-prod-down
N/A
inv-pow
N/A
lower-*.f64
N/A
Applied rewrites68.9%
Applied rewrites69.3%
Final simplification69.3%
(FPCore (x) :precision binary64 (- (/ (+ (tan 1e-12) (tan x)) (fma (- (tan 1e-12)) (tan x) 1.0)) (tan x)))
double code(double x) { return ((tan(1e-12) + tan(x)) / fma(-tan(1e-12), tan(x), 1.0)) - tan(x); }
function code(x) return Float64(Float64(Float64(tan(1e-12) + tan(x)) / fma(Float64(-tan(1e-12)), tan(x), 1.0)) - tan(x)) end
code[x_] := N[(N[(N[(N[Tan[1e-12], $MachinePrecision] + N[Tan[x], $MachinePrecision]), $MachinePrecision] / N[((-N[Tan[1e-12], $MachinePrecision]) * N[Tan[x], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\tan \left( 10^{-12} \right) + \tan x}{\mathsf{fma}\left(-\tan \left( 10^{-12} \right), \tan x, 1\right)} - \tan x \end{array}
Initial program 48.1%
lift-tan.f64
N/A
lift-+.f64
N/A
tan-sum
N/A
lower-/.f64
N/A
lift-tan.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
lower-tan.f64
N/A
lift-pow.f64
N/A
lift-neg.f64
N/A
metadata-eval
N/A
metadata-eval
N/A
sub-neg
N/A
+-commutative
N/A
lift-tan.f64
N/A
*-commutative
N/A
distribute-lft-neg-in
N/A
lower-fma.f64
N/A
Applied rewrites69.3%
(FPCore (x) :precision binary64 (if (<= (- (tan (+ x (pow 10.0 -12.0))) (tan x)) 0.0) (- (tan 1e-12) (tan x)) (- (tan (+ x 1e-12)) (tan x))))
double code(double x) { double tmp; if ((tan((x + pow(10.0, -12.0))) - tan(x)) <= 0.0) { tmp = tan(1e-12) - tan(x); } else { tmp = tan((x + 1e-12)) - tan(x); } return tmp; }
real(8) function code(x) real(8), intent (in) :: x real(8) :: tmp if ((tan((x + (10.0d0 ** (-12.0d0)))) - tan(x)) <= 0.0d0) then tmp = tan(1d-12) - tan(x) else tmp = tan((x + 1d-12)) - tan(x) end if code = tmp end function
public static double code(double x) { double tmp; if ((Math.tan((x + Math.pow(10.0, -12.0))) - Math.tan(x)) <= 0.0) { tmp = Math.tan(1e-12) - Math.tan(x); } else { tmp = Math.tan((x + 1e-12)) - Math.tan(x); } return tmp; }
def code(x): tmp = 0 if (math.tan((x + math.pow(10.0, -12.0))) - math.tan(x)) <= 0.0: tmp = math.tan(1e-12) - math.tan(x) else: tmp = math.tan((x + 1e-12)) - math.tan(x) return tmp
function code(x) tmp = 0.0 if (Float64(tan(Float64(x + (10.0 ^ -12.0))) - tan(x)) <= 0.0) tmp = Float64(tan(1e-12) - tan(x)); else tmp = Float64(tan(Float64(x + 1e-12)) - tan(x)); end return tmp end
function tmp_2 = code(x) tmp = 0.0; if ((tan((x + (10.0 ^ -12.0))) - tan(x)) <= 0.0) tmp = tan(1e-12) - tan(x); else tmp = tan((x + 1e-12)) - tan(x); end tmp_2 = tmp; end
code[x_] := If[LessEqual[N[(N[Tan[N[(x + N[Power[10.0, -12.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision], 0.0], N[(N[Tan[1e-12], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision], N[(N[Tan[N[(x + 1e-12), $MachinePrecision]], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;\tan \left(x + {10}^{-12}\right) - \tan x \leq 0:\\ \;\;\;\;\tan \left( 10^{-12} \right) - \tan x\\ \mathbf{else}:\\ \;\;\;\;\tan \left(x + 10^{-12}\right) - \tan x\\ \end{array} \end{array}
if (-.f64 (tan.f64 (+.f64 x (pow.f64 #s(literal 10 binary64) (neg.f64 #s(literal 12 binary64))))) (tan.f64 x)) < 0.0
Initial program 3.2%
Taylor expanded in x around 0
Applied rewrites6.2%
if 0.0 < (-.f64 (tan.f64 (+.f64 x (pow.f64 #s(literal 10 binary64) (neg.f64 #s(literal 12 binary64))))) (tan.f64 x))
Initial program 97.4%
lift-pow.f64
N/A
lift-neg.f64
N/A
metadata-eval
N/A
metadata-eval
97.4
Applied rewrites97.4%
Final simplification49.7%
(FPCore (x) :precision binary64 (/ (sin 1e-12) (cos 1e-12)))
double code(double x) { return sin(1e-12) / cos(1e-12); }
real(8) function code(x) real(8), intent (in) :: x code = sin(1d-12) / cos(1d-12) end function
public static double code(double x) { return Math.sin(1e-12) / Math.cos(1e-12); }
def code(x): return math.sin(1e-12) / math.cos(1e-12)
function code(x) return Float64(sin(1e-12) / cos(1e-12)) end
function tmp = code(x) tmp = sin(1e-12) / cos(1e-12); end
code[x_] := N[(N[Sin[1e-12], $MachinePrecision] / N[Cos[1e-12], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sin \left( 10^{-12} \right)}{\cos \left( 10^{-12} \right)} \end{array}
Initial program 48.1%
Taylor expanded in x around 0
lower-/.f64
N/A
lower-sin.f64
N/A
lower-cos.f64
57.1
Applied rewrites57.1%
(FPCore (x) :precision binary64 (- (tan 1e-12) (tan x)))
double code(double x) { return tan(1e-12) - tan(x); }
real(8) function code(x) real(8), intent (in) :: x code = tan(1d-12) - tan(x) end function
public static double code(double x) { return Math.tan(1e-12) - Math.tan(x); }
def code(x): return math.tan(1e-12) - math.tan(x)
function code(x) return Float64(tan(1e-12) - tan(x)) end
function tmp = code(x) tmp = tan(1e-12) - tan(x); end
code[x_] := N[(N[Tan[1e-12], $MachinePrecision] - N[Tan[x], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \tan \left( 10^{-12} \right) - \tan x \end{array}
Initial program 48.1%
Taylor expanded in x around 0
Applied rewrites46.6%
herbie shell --seed 1
(FPCore (x)
:name "expr_51555138"
:precision binary64
(- (tan (+ x (pow 10.0 (- 12.0)))) (tan x)))