(FPCore (x) :precision binary64 (+ (- x) (sqrt (- (* x x) 0.001))))
double code(double x) { return -x + sqrt(((x * x) - 0.001)); }
real(8) function code(x) real(8), intent (in) :: x code = -x + sqrt(((x * x) - 0.001d0)) end function
public static double code(double x) { return -x + Math.sqrt(((x * x) - 0.001)); }
def code(x): return -x + math.sqrt(((x * x) - 0.001))
function code(x) return Float64(Float64(-x) + sqrt(Float64(Float64(x * x) - 0.001))) end
function tmp = code(x) tmp = -x + sqrt(((x * x) - 0.001)); end
code[x_] := N[((-x) + N[Sqrt[N[(N[(x * x), $MachinePrecision] - 0.001), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \left(-x\right) + \sqrt{x \cdot x - 0.001} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (+ (- x) (sqrt (- (* x x) 0.001))))
double code(double x) { return -x + sqrt(((x * x) - 0.001)); }
real(8) function code(x) real(8), intent (in) :: x code = -x + sqrt(((x * x) - 0.001d0)) end function
public static double code(double x) { return -x + Math.sqrt(((x * x) - 0.001)); }
def code(x): return -x + math.sqrt(((x * x) - 0.001))
function code(x) return Float64(Float64(-x) + sqrt(Float64(Float64(x * x) - 0.001))) end
function tmp = code(x) tmp = -x + sqrt(((x * x) - 0.001)); end
code[x_] := N[((-x) + N[Sqrt[N[(N[(x * x), $MachinePrecision] - 0.001), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \left(-x\right) + \sqrt{x \cdot x - 0.001} \end{array}
(FPCore (x) :precision binary64 (* (fma x (- x (sqrt (fma x x -0.001))) (fma x x -0.001)) (/ (- (- -0.0005 (/ 3.75e-7 (* x x))) (/ (+ (/ 5.4687500000000007e-14 (* x x)) 1.8750000000000002e-10) (pow x 4.0))) (pow x 3.0))))
double code(double x) { return fma(x, (x - sqrt(fma(x, x, -0.001))), fma(x, x, -0.001)) * (((-0.0005 - (3.75e-7 / (x * x))) - (((5.4687500000000007e-14 / (x * x)) + 1.8750000000000002e-10) / pow(x, 4.0))) / pow(x, 3.0)); }
function code(x) return Float64(fma(x, Float64(x - sqrt(fma(x, x, -0.001))), fma(x, x, -0.001)) * Float64(Float64(Float64(-0.0005 - Float64(3.75e-7 / Float64(x * x))) - Float64(Float64(Float64(5.4687500000000007e-14 / Float64(x * x)) + 1.8750000000000002e-10) / (x ^ 4.0))) / (x ^ 3.0))) end
code[x_] := N[(N[(x * N[(x - N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(x * x + -0.001), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(-0.0005 - N[(3.75e-7 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(5.4687500000000007e-14 / N[(x * x), $MachinePrecision]), $MachinePrecision] + 1.8750000000000002e-10), $MachinePrecision] / N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, -0.001\right)}, \mathsf{fma}\left(x, x, -0.001\right)\right) \cdot \frac{\left(-0.0005 - \frac{3.75 \cdot 10^{-7}}{x \cdot x}\right) - \frac{\frac{5.4687500000000007 \cdot 10^{-14}}{x \cdot x} + 1.8750000000000002 \cdot 10^{-10}}{{x}^{4}}}{{x}^{3}} \end{array}
Initial program 84.6%
Applied rewrites84.7%
Taylor expanded in x around inf
lower-/.f64
N/A
Applied rewrites95.3%
(FPCore (x) :precision binary64 (/ (fma (fma (fma -0.0005 (* x x) -1.25e-7) (* x x) -6.25e-11) (* x x) -3.9062500000000004e-14) (pow x 7.0)))
double code(double x) { return fma(fma(fma(-0.0005, (x * x), -1.25e-7), (x * x), -6.25e-11), (x * x), -3.9062500000000004e-14) / pow(x, 7.0); }
function code(x) return Float64(fma(fma(fma(-0.0005, Float64(x * x), -1.25e-7), Float64(x * x), -6.25e-11), Float64(x * x), -3.9062500000000004e-14) / (x ^ 7.0)) end
code[x_] := N[(N[(N[(N[(-0.0005 * N[(x * x), $MachinePrecision] + -1.25e-7), $MachinePrecision] * N[(x * x), $MachinePrecision] + -6.25e-11), $MachinePrecision] * N[(x * x), $MachinePrecision] + -3.9062500000000004e-14), $MachinePrecision] / N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.0005, x \cdot x, -1.25 \cdot 10^{-7}\right), x \cdot x, -6.25 \cdot 10^{-11}\right), x \cdot x, -3.9062500000000004 \cdot 10^{-14}\right)}{{x}^{7}} \end{array}
Initial program 84.6%
Taylor expanded in x around inf
lower-/.f64
N/A
Applied rewrites93.7%
Taylor expanded in x around 0
Applied rewrites93.7%
(FPCore (x) :precision binary64 (pow (/ (+ (sqrt (fma x x -0.001)) x) (- (fma x x -0.001) (* x x))) -1.0))
double code(double x) { return pow(((sqrt(fma(x, x, -0.001)) + x) / (fma(x, x, -0.001) - (x * x))), -1.0); }
function code(x) return Float64(Float64(sqrt(fma(x, x, -0.001)) + x) / Float64(fma(x, x, -0.001) - Float64(x * x))) ^ -1.0 end
code[x_] := N[Power[N[(N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision] / N[(N[(x * x + -0.001), $MachinePrecision] - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]
\begin{array}{l} \\ {\left(\frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}\right)}^{-1} \end{array}
Initial program 84.6%
lift-+.f64
N/A
+-commutative
N/A
lift-neg.f64
N/A
unsub-neg
N/A
lower--.f64
84.6
lift--.f64
N/A
sub-neg
N/A
lift-*.f64
N/A
lower-fma.f64
N/A
metadata-eval
84.7
Applied rewrites84.7%
lift--.f64
N/A
flip--
N/A
clear-num
N/A
lower-/.f64
N/A
lower-/.f64
N/A
lower-+.f64
N/A
lift-sqrt.f64
N/A
lift-sqrt.f64
N/A
rem-square-sqrt
N/A
lift-*.f64
N/A
lower--.f64
84.9
Applied rewrites84.9%
Final simplification84.9%
(FPCore (x) :precision binary64 (let* ((t_0 (+ (sqrt (fma x x -0.001)) x))) (- (/ (fma x x -0.001) t_0) (/ (* x x) t_0))))
double code(double x) { double t_0 = sqrt(fma(x, x, -0.001)) + x; return (fma(x, x, -0.001) / t_0) - ((x * x) / t_0); }
function code(x) t_0 = Float64(sqrt(fma(x, x, -0.001)) + x) return Float64(Float64(fma(x, x, -0.001) / t_0) - Float64(Float64(x * x) / t_0)) end
code[x_] := Block[{t$95$0 = N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision]}, N[(N[(N[(x * x + -0.001), $MachinePrecision] / t$95$0), $MachinePrecision] - N[(N[(x * x), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x\\ \frac{\mathsf{fma}\left(x, x, -0.001\right)}{t\_0} - \frac{x \cdot x}{t\_0} \end{array} \end{array}
Initial program 84.6%
lift-+.f64
N/A
+-commutative
N/A
lift-neg.f64
N/A
unsub-neg
N/A
lower--.f64
84.6
lift--.f64
N/A
sub-neg
N/A
lift-*.f64
N/A
lower-fma.f64
N/A
metadata-eval
84.7
Applied rewrites84.7%
lift--.f64
N/A
flip--
N/A
lift-sqrt.f64
N/A
lift-sqrt.f64
N/A
rem-square-sqrt
N/A
lift-*.f64
N/A
div-sub
N/A
lower--.f64
N/A
lower-/.f64
N/A
lower-+.f64
N/A
lower-/.f64
N/A
lower-+.f64
85.0
Applied rewrites85.0%
(FPCore (x) :precision binary64 (/ (- (fma x x -0.001) (* x x)) (+ (sqrt (fma x x -0.001)) x)))
double code(double x) { return (fma(x, x, -0.001) - (x * x)) / (sqrt(fma(x, x, -0.001)) + x); }
function code(x) return Float64(Float64(fma(x, x, -0.001) - Float64(x * x)) / Float64(sqrt(fma(x, x, -0.001)) + x)) end
code[x_] := N[(N[(N[(x * x + -0.001), $MachinePrecision] - N[(x * x), $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x} \end{array}
Initial program 84.6%
lift-+.f64
N/A
+-commutative
N/A
lift-neg.f64
N/A
unsub-neg
N/A
lower--.f64
84.6
lift--.f64
N/A
sub-neg
N/A
lift-*.f64
N/A
lower-fma.f64
N/A
metadata-eval
84.7
Applied rewrites84.7%
lift--.f64
N/A
flip--
N/A
lower-/.f64
N/A
lift-sqrt.f64
N/A
lift-sqrt.f64
N/A
rem-square-sqrt
N/A
lift-*.f64
N/A
lower--.f64
N/A
lower-+.f64
84.9
Applied rewrites84.9%
(FPCore (x) :precision binary64 (- (sqrt (fma x x -0.001)) x))
double code(double x) { return sqrt(fma(x, x, -0.001)) - x; }
function code(x) return Float64(sqrt(fma(x, x, -0.001)) - x) end
code[x_] := N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] - x), $MachinePrecision]
\begin{array}{l} \\ \sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x \end{array}
Initial program 84.6%
lift-+.f64
N/A
+-commutative
N/A
lift-neg.f64
N/A
unsub-neg
N/A
lower--.f64
84.6
lift--.f64
N/A
sub-neg
N/A
lift-*.f64
N/A
lower-fma.f64
N/A
metadata-eval
84.7
Applied rewrites84.7%
(FPCore (x) :precision binary64 (/ -0.0005 x))
double code(double x) { return -0.0005 / x; }
real(8) function code(x) real(8), intent (in) :: x code = (-0.0005d0) / x end function
public static double code(double x) { return -0.0005 / x; }
def code(x): return -0.0005 / x
function code(x) return Float64(-0.0005 / x) end
function tmp = code(x) tmp = -0.0005 / x; end
code[x_] := N[(-0.0005 / x), $MachinePrecision]
\begin{array}{l} \\ \frac{-0.0005}{x} \end{array}
Initial program 84.6%
Taylor expanded in x around inf
lower-/.f64
38.5
Applied rewrites38.5%
(FPCore (x) :precision binary64 (* -2.0 x))
double code(double x) { return -2.0 * x; }
real(8) function code(x) real(8), intent (in) :: x code = (-2.0d0) * x end function
public static double code(double x) { return -2.0 * x; }
def code(x): return -2.0 * x
function code(x) return Float64(-2.0 * x) end
function tmp = code(x) tmp = -2.0 * x; end
code[x_] := N[(-2.0 * x), $MachinePrecision]
\begin{array}{l} \\ -2 \cdot x \end{array}
Initial program 84.6%
Taylor expanded in x around -inf
lower-*.f64
12.9
Applied rewrites12.9%
herbie shell --seed 1
(FPCore (x)
:name "-x+sqrt(x*x-0.001)"
:precision binary64
:pre (and (<= 1.0 x) (<= x 2.0))
(+ (- x) (sqrt (- (* x x) 0.001))))