-x+sqrt(x*x-0.001)

Percentage Accurate: 84.8% → 95.1%
Time: 4.2s
Alternatives: 8
Speedup: 1.2×

Specification

?
\[1 \leq x \land x \leq 2\]
\[\begin{array}{l} \\ \left(-x\right) + \sqrt{x \cdot x - 0.001} \end{array} \]
(FPCore (x) :precision binary64 (+ (- x) (sqrt (- (* x x) 0.001))))
double code(double x) {
	return -x + sqrt(((x * x) - 0.001));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = -x + sqrt(((x * x) - 0.001d0))
end function
public static double code(double x) {
	return -x + Math.sqrt(((x * x) - 0.001));
}
def code(x):
	return -x + math.sqrt(((x * x) - 0.001))
function code(x)
	return Float64(Float64(-x) + sqrt(Float64(Float64(x * x) - 0.001)))
end
function tmp = code(x)
	tmp = -x + sqrt(((x * x) - 0.001));
end
code[x_] := N[((-x) + N[Sqrt[N[(N[(x * x), $MachinePrecision] - 0.001), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-x\right) + \sqrt{x \cdot x - 0.001}
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 84.8% accurate, 1.0× speedup?

\[\begin{array}{l} \\ \left(-x\right) + \sqrt{x \cdot x - 0.001} \end{array} \]
(FPCore (x) :precision binary64 (+ (- x) (sqrt (- (* x x) 0.001))))
double code(double x) {
	return -x + sqrt(((x * x) - 0.001));
}
real(8) function code(x)
    real(8), intent (in) :: x
    code = -x + sqrt(((x * x) - 0.001d0))
end function
public static double code(double x) {
	return -x + Math.sqrt(((x * x) - 0.001));
}
def code(x):
	return -x + math.sqrt(((x * x) - 0.001))
function code(x)
	return Float64(Float64(-x) + sqrt(Float64(Float64(x * x) - 0.001)))
end
function tmp = code(x)
	tmp = -x + sqrt(((x * x) - 0.001));
end
code[x_] := N[((-x) + N[Sqrt[N[(N[(x * x), $MachinePrecision] - 0.001), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\left(-x\right) + \sqrt{x \cdot x - 0.001}
\end{array}

Alternative 1: 95.1% accurate, 0.1× speedup?

\[\begin{array}{l} \\ \mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, -0.001\right)}, \mathsf{fma}\left(x, x, -0.001\right)\right) \cdot \frac{\left(-0.0005 - \frac{3.75 \cdot 10^{-7}}{x \cdot x}\right) - \frac{\frac{5.4687500000000007 \cdot 10^{-14}}{x \cdot x} + 1.8750000000000002 \cdot 10^{-10}}{{x}^{4}}}{{x}^{3}} \end{array} \]
(FPCore (x)
 :precision binary64
 (*
  (fma x (- x (sqrt (fma x x -0.001))) (fma x x -0.001))
  (/
   (-
    (- -0.0005 (/ 3.75e-7 (* x x)))
    (/
     (+ (/ 5.4687500000000007e-14 (* x x)) 1.8750000000000002e-10)
     (pow x 4.0)))
   (pow x 3.0))))
double code(double x) {
	return fma(x, (x - sqrt(fma(x, x, -0.001))), fma(x, x, -0.001)) * (((-0.0005 - (3.75e-7 / (x * x))) - (((5.4687500000000007e-14 / (x * x)) + 1.8750000000000002e-10) / pow(x, 4.0))) / pow(x, 3.0));
}
function code(x)
	return Float64(fma(x, Float64(x - sqrt(fma(x, x, -0.001))), fma(x, x, -0.001)) * Float64(Float64(Float64(-0.0005 - Float64(3.75e-7 / Float64(x * x))) - Float64(Float64(Float64(5.4687500000000007e-14 / Float64(x * x)) + 1.8750000000000002e-10) / (x ^ 4.0))) / (x ^ 3.0)))
end
code[x_] := N[(N[(x * N[(x - N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] + N[(x * x + -0.001), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(-0.0005 - N[(3.75e-7 / N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - N[(N[(N[(5.4687500000000007e-14 / N[(x * x), $MachinePrecision]), $MachinePrecision] + 1.8750000000000002e-10), $MachinePrecision] / N[Power[x, 4.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] / N[Power[x, 3.0], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, -0.001\right)}, \mathsf{fma}\left(x, x, -0.001\right)\right) \cdot \frac{\left(-0.0005 - \frac{3.75 \cdot 10^{-7}}{x \cdot x}\right) - \frac{\frac{5.4687500000000007 \cdot 10^{-14}}{x \cdot x} + 1.8750000000000002 \cdot 10^{-10}}{{x}^{4}}}{{x}^{3}}
\end{array}
Derivation
  1. Initial program 84.6%

    \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
  2. Add Preprocessing
  3. Applied rewrites84.7%

    \[\leadsto \color{blue}{\mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, -0.001\right)}, \mathsf{fma}\left(x, x, -0.001\right)\right) \cdot \frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x}{\mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, -0.001\right)}, \mathsf{fma}\left(x, x, -0.001\right)\right)}} \]
  4. Taylor expanded in x around inf

    \[\leadsto \mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}, \mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)\right) \cdot \color{blue}{\frac{-1 \cdot \frac{\frac{4597486622597666862188288170992762074823622269}{24519928653854221733733552434404946937899825954937634816} + \frac{12367929453448691336920085967077774584285263551822347651176967}{226156424291633194186662080095093570025917938800079226639565593765455331328} \cdot \frac{1}{{x}^{2}}}{{x}^{4}} - \left(\frac{1152921504606847}{2305843009213693952} + \frac{3987683987354747784732117844227}{10633823966279326983230456482242756608} \cdot \frac{1}{{x}^{2}}\right)}{{x}^{3}}} \]
  5. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}, \mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)\right) \cdot \color{blue}{\frac{-1 \cdot \frac{\frac{4597486622597666862188288170992762074823622269}{24519928653854221733733552434404946937899825954937634816} + \frac{12367929453448691336920085967077774584285263551822347651176967}{226156424291633194186662080095093570025917938800079226639565593765455331328} \cdot \frac{1}{{x}^{2}}}{{x}^{4}} - \left(\frac{1152921504606847}{2305843009213693952} + \frac{3987683987354747784732117844227}{10633823966279326983230456482242756608} \cdot \frac{1}{{x}^{2}}\right)}{{x}^{3}}} \]
  6. Applied rewrites95.3%

    \[\leadsto \mathsf{fma}\left(x, x - \sqrt{\mathsf{fma}\left(x, x, -0.001\right)}, \mathsf{fma}\left(x, x, -0.001\right)\right) \cdot \color{blue}{\frac{\left(-0.0005 - \frac{3.75 \cdot 10^{-7}}{x \cdot x}\right) - \frac{\frac{5.4687500000000007 \cdot 10^{-14}}{x \cdot x} + 1.8750000000000002 \cdot 10^{-10}}{{x}^{4}}}{{x}^{3}}} \]
  7. Add Preprocessing

Alternative 2: 93.6% accurate, 0.2× speedup?

\[\begin{array}{l} \\ \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.0005, x \cdot x, -1.25 \cdot 10^{-7}\right), x \cdot x, -6.25 \cdot 10^{-11}\right), x \cdot x, -3.9062500000000004 \cdot 10^{-14}\right)}{{x}^{7}} \end{array} \]
(FPCore (x)
 :precision binary64
 (/
  (fma
   (fma (fma -0.0005 (* x x) -1.25e-7) (* x x) -6.25e-11)
   (* x x)
   -3.9062500000000004e-14)
  (pow x 7.0)))
double code(double x) {
	return fma(fma(fma(-0.0005, (x * x), -1.25e-7), (x * x), -6.25e-11), (x * x), -3.9062500000000004e-14) / pow(x, 7.0);
}
function code(x)
	return Float64(fma(fma(fma(-0.0005, Float64(x * x), -1.25e-7), Float64(x * x), -6.25e-11), Float64(x * x), -3.9062500000000004e-14) / (x ^ 7.0))
end
code[x_] := N[(N[(N[(N[(-0.0005 * N[(x * x), $MachinePrecision] + -1.25e-7), $MachinePrecision] * N[(x * x), $MachinePrecision] + -6.25e-11), $MachinePrecision] * N[(x * x), $MachinePrecision] + -3.9062500000000004e-14), $MachinePrecision] / N[Power[x, 7.0], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
\frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.0005, x \cdot x, -1.25 \cdot 10^{-7}\right), x \cdot x, -6.25 \cdot 10^{-11}\right), x \cdot x, -3.9062500000000004 \cdot 10^{-14}\right)}{{x}^{7}}
\end{array}
Derivation
  1. Initial program 84.6%

    \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
  2. Add Preprocessing
  3. Taylor expanded in x around inf

    \[\leadsto \color{blue}{\frac{-1 \cdot \frac{\frac{1532495540865888954062762723664254024941207423}{24519928653854221733733552434404946937899825954937634816} + \frac{8834235323891922383514347119341267560203759679873105465126405}{226156424291633194186662080095093570025917938800079226639565593765455331328} \cdot \frac{1}{{x}^{2}}}{{x}^{4}} - \left(\frac{1152921504606847}{2305843009213693952} + \frac{1329227995784915928244039281409}{10633823966279326983230456482242756608} \cdot \frac{1}{{x}^{2}}\right)}{x}} \]
  4. Step-by-step derivation
    1. lower-/.f64N/A

      \[\leadsto \color{blue}{\frac{-1 \cdot \frac{\frac{1532495540865888954062762723664254024941207423}{24519928653854221733733552434404946937899825954937634816} + \frac{8834235323891922383514347119341267560203759679873105465126405}{226156424291633194186662080095093570025917938800079226639565593765455331328} \cdot \frac{1}{{x}^{2}}}{{x}^{4}} - \left(\frac{1152921504606847}{2305843009213693952} + \frac{1329227995784915928244039281409}{10633823966279326983230456482242756608} \cdot \frac{1}{{x}^{2}}\right)}{x}} \]
  5. Applied rewrites93.7%

    \[\leadsto \color{blue}{\frac{\left(-0.0005 - \frac{1.25 \cdot 10^{-7}}{x \cdot x}\right) - \frac{\frac{3.9062500000000004 \cdot 10^{-14}}{x \cdot x} + 6.25 \cdot 10^{-11}}{{x}^{4}}}{x}} \]
  6. Taylor expanded in x around 0

    \[\leadsto \frac{{x}^{2} \cdot \left({x}^{2} \cdot \left(\frac{-1152921504606847}{2305843009213693952} \cdot {x}^{2} - \frac{1329227995784915928244039281409}{10633823966279326983230456482242756608}\right) - \frac{1532495540865888954062762723664254024941207423}{24519928653854221733733552434404946937899825954937634816}\right) - \frac{8834235323891922383514347119341267560203759679873105465126405}{226156424291633194186662080095093570025917938800079226639565593765455331328}}{\color{blue}{{x}^{7}}} \]
  7. Step-by-step derivation
    1. Applied rewrites93.7%

      \[\leadsto \frac{\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.0005, x \cdot x, -1.25 \cdot 10^{-7}\right), x \cdot x, -6.25 \cdot 10^{-11}\right), x \cdot x, -3.9062500000000004 \cdot 10^{-14}\right)}{\color{blue}{{x}^{7}}} \]
    2. Add Preprocessing

    Alternative 3: 84.9% accurate, 0.2× speedup?

    \[\begin{array}{l} \\ {\left(\frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}\right)}^{-1} \end{array} \]
    (FPCore (x)
     :precision binary64
     (pow (/ (+ (sqrt (fma x x -0.001)) x) (- (fma x x -0.001) (* x x))) -1.0))
    double code(double x) {
    	return pow(((sqrt(fma(x, x, -0.001)) + x) / (fma(x, x, -0.001) - (x * x))), -1.0);
    }
    
    function code(x)
    	return Float64(Float64(sqrt(fma(x, x, -0.001)) + x) / Float64(fma(x, x, -0.001) - Float64(x * x))) ^ -1.0
    end
    
    code[x_] := N[Power[N[(N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision] / N[(N[(x * x + -0.001), $MachinePrecision] - N[(x * x), $MachinePrecision]), $MachinePrecision]), $MachinePrecision], -1.0], $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    {\left(\frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}\right)}^{-1}
    \end{array}
    
    Derivation
    1. Initial program 84.6%

      \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\left(-x\right) + \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \left(-x\right)} \]
      3. lift-neg.f64N/A

        \[\leadsto \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \color{blue}{\left(\mathsf{neg}\left(x\right)\right)} \]
      4. unsub-negN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} - x} \]
      5. lower--.f6484.6

        \[\leadsto \color{blue}{\sqrt{x \cdot x - 0.001} - x} \]
      6. lift--.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} - x \]
      7. sub-negN/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      8. lift-*.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x} + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)} - x \]
      9. lower-fma.f64N/A

        \[\leadsto \sqrt{\color{blue}{\mathsf{fma}\left(x, x, \mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      10. metadata-eval84.7

        \[\leadsto \sqrt{\mathsf{fma}\left(x, x, \color{blue}{-0.001}\right)} - x \]
    4. Applied rewrites84.7%

      \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x} \]
    5. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x} \]
      2. flip--N/A

        \[\leadsto \color{blue}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      3. clear-numN/A

        \[\leadsto \color{blue}{\frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}}} \]
      4. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}}} \]
      5. lower-/.f64N/A

        \[\leadsto \frac{1}{\color{blue}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}}} \]
      6. lower-+.f64N/A

        \[\leadsto \frac{1}{\frac{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}} \]
      7. lift-sqrt.f64N/A

        \[\leadsto \frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}} \]
      8. lift-sqrt.f64N/A

        \[\leadsto \frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}} - x \cdot x}} \]
      9. rem-square-sqrtN/A

        \[\leadsto \frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\color{blue}{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}} \]
      10. lift-*.f64N/A

        \[\leadsto \frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right) - \color{blue}{x \cdot x}}} \]
      11. lower--.f6484.9

        \[\leadsto \frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}{\color{blue}{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}}} \]
    6. Applied rewrites84.9%

      \[\leadsto \color{blue}{\frac{1}{\frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}}} \]
    7. Final simplification84.9%

      \[\leadsto {\left(\frac{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}\right)}^{-1} \]
    8. Add Preprocessing

    Alternative 4: 85.2% accurate, 0.3× speedup?

    \[\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x\\ \frac{\mathsf{fma}\left(x, x, -0.001\right)}{t\_0} - \frac{x \cdot x}{t\_0} \end{array} \end{array} \]
    (FPCore (x)
     :precision binary64
     (let* ((t_0 (+ (sqrt (fma x x -0.001)) x)))
       (- (/ (fma x x -0.001) t_0) (/ (* x x) t_0))))
    double code(double x) {
    	double t_0 = sqrt(fma(x, x, -0.001)) + x;
    	return (fma(x, x, -0.001) / t_0) - ((x * x) / t_0);
    }
    
    function code(x)
    	t_0 = Float64(sqrt(fma(x, x, -0.001)) + x)
    	return Float64(Float64(fma(x, x, -0.001) / t_0) - Float64(Float64(x * x) / t_0))
    end
    
    code[x_] := Block[{t$95$0 = N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision]}, N[(N[(N[(x * x + -0.001), $MachinePrecision] / t$95$0), $MachinePrecision] - N[(N[(x * x), $MachinePrecision] / t$95$0), $MachinePrecision]), $MachinePrecision]]
    
    \begin{array}{l}
    
    \\
    \begin{array}{l}
    t_0 := \sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x\\
    \frac{\mathsf{fma}\left(x, x, -0.001\right)}{t\_0} - \frac{x \cdot x}{t\_0}
    \end{array}
    \end{array}
    
    Derivation
    1. Initial program 84.6%

      \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\left(-x\right) + \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \left(-x\right)} \]
      3. lift-neg.f64N/A

        \[\leadsto \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \color{blue}{\left(\mathsf{neg}\left(x\right)\right)} \]
      4. unsub-negN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} - x} \]
      5. lower--.f6484.6

        \[\leadsto \color{blue}{\sqrt{x \cdot x - 0.001} - x} \]
      6. lift--.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} - x \]
      7. sub-negN/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      8. lift-*.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x} + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)} - x \]
      9. lower-fma.f64N/A

        \[\leadsto \sqrt{\color{blue}{\mathsf{fma}\left(x, x, \mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      10. metadata-eval84.7

        \[\leadsto \sqrt{\mathsf{fma}\left(x, x, \color{blue}{-0.001}\right)} - x \]
    4. Applied rewrites84.7%

      \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x} \]
    5. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x} \]
      2. flip--N/A

        \[\leadsto \color{blue}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      3. lift-sqrt.f64N/A

        \[\leadsto \frac{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      4. lift-sqrt.f64N/A

        \[\leadsto \frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      5. rem-square-sqrtN/A

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      6. lift-*.f64N/A

        \[\leadsto \frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right) - \color{blue}{x \cdot x}}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      7. div-subN/A

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} - \frac{x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      8. lower--.f64N/A

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} - \frac{x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      9. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} - \frac{x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      10. lower-+.f64N/A

        \[\leadsto \frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} - \frac{x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      11. lower-/.f64N/A

        \[\leadsto \frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} - \color{blue}{\frac{x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      12. lower-+.f6485.0

        \[\leadsto \frac{\mathsf{fma}\left(x, x, -0.001\right)}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x} - \frac{x \cdot x}{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}} \]
    6. Applied rewrites85.0%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(x, x, -0.001\right)}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x} - \frac{x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}} \]
    7. Add Preprocessing

    Alternative 5: 84.9% accurate, 0.5× speedup?

    \[\begin{array}{l} \\ \frac{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x} \end{array} \]
    (FPCore (x)
     :precision binary64
     (/ (- (fma x x -0.001) (* x x)) (+ (sqrt (fma x x -0.001)) x)))
    double code(double x) {
    	return (fma(x, x, -0.001) - (x * x)) / (sqrt(fma(x, x, -0.001)) + x);
    }
    
    function code(x)
    	return Float64(Float64(fma(x, x, -0.001) - Float64(x * x)) / Float64(sqrt(fma(x, x, -0.001)) + x))
    end
    
    code[x_] := N[(N[(N[(x * x + -0.001), $MachinePrecision] - N[(x * x), $MachinePrecision]), $MachinePrecision] / N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] + x), $MachinePrecision]), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}
    \end{array}
    
    Derivation
    1. Initial program 84.6%

      \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\left(-x\right) + \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \left(-x\right)} \]
      3. lift-neg.f64N/A

        \[\leadsto \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \color{blue}{\left(\mathsf{neg}\left(x\right)\right)} \]
      4. unsub-negN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} - x} \]
      5. lower--.f6484.6

        \[\leadsto \color{blue}{\sqrt{x \cdot x - 0.001} - x} \]
      6. lift--.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} - x \]
      7. sub-negN/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      8. lift-*.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x} + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)} - x \]
      9. lower-fma.f64N/A

        \[\leadsto \sqrt{\color{blue}{\mathsf{fma}\left(x, x, \mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      10. metadata-eval84.7

        \[\leadsto \sqrt{\mathsf{fma}\left(x, x, \color{blue}{-0.001}\right)} - x \]
    4. Applied rewrites84.7%

      \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x} \]
    5. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x} \]
      2. flip--N/A

        \[\leadsto \color{blue}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      3. lower-/.f64N/A

        \[\leadsto \color{blue}{\frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x}} \]
      4. lift-sqrt.f64N/A

        \[\leadsto \frac{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}} \cdot \sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      5. lift-sqrt.f64N/A

        \[\leadsto \frac{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} \cdot \color{blue}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)}} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      6. rem-square-sqrtN/A

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      7. lift-*.f64N/A

        \[\leadsto \frac{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right) - \color{blue}{x \cdot x}}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      8. lower--.f64N/A

        \[\leadsto \frac{\color{blue}{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right) - x \cdot x}}{\sqrt{\mathsf{fma}\left(x, x, \frac{-1152921504606847}{1152921504606846976}\right)} + x} \]
      9. lower-+.f6484.9

        \[\leadsto \frac{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}{\color{blue}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}} \]
    6. Applied rewrites84.9%

      \[\leadsto \color{blue}{\frac{\mathsf{fma}\left(x, x, -0.001\right) - x \cdot x}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} + x}} \]
    7. Add Preprocessing

    Alternative 6: 84.9% accurate, 1.2× speedup?

    \[\begin{array}{l} \\ \sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x \end{array} \]
    (FPCore (x) :precision binary64 (- (sqrt (fma x x -0.001)) x))
    double code(double x) {
    	return sqrt(fma(x, x, -0.001)) - x;
    }
    
    function code(x)
    	return Float64(sqrt(fma(x, x, -0.001)) - x)
    end
    
    code[x_] := N[(N[Sqrt[N[(x * x + -0.001), $MachinePrecision]], $MachinePrecision] - x), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x
    \end{array}
    
    Derivation
    1. Initial program 84.6%

      \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-+.f64N/A

        \[\leadsto \color{blue}{\left(-x\right) + \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} \]
      2. +-commutativeN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \left(-x\right)} \]
      3. lift-neg.f64N/A

        \[\leadsto \sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} + \color{blue}{\left(\mathsf{neg}\left(x\right)\right)} \]
      4. unsub-negN/A

        \[\leadsto \color{blue}{\sqrt{x \cdot x - \frac{1152921504606847}{1152921504606846976}} - x} \]
      5. lower--.f6484.6

        \[\leadsto \color{blue}{\sqrt{x \cdot x - 0.001} - x} \]
      6. lift--.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x - \frac{1152921504606847}{1152921504606846976}}} - x \]
      7. sub-negN/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      8. lift-*.f64N/A

        \[\leadsto \sqrt{\color{blue}{x \cdot x} + \left(\mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)} - x \]
      9. lower-fma.f64N/A

        \[\leadsto \sqrt{\color{blue}{\mathsf{fma}\left(x, x, \mathsf{neg}\left(\frac{1152921504606847}{1152921504606846976}\right)\right)}} - x \]
      10. metadata-eval84.7

        \[\leadsto \sqrt{\mathsf{fma}\left(x, x, \color{blue}{-0.001}\right)} - x \]
    4. Applied rewrites84.7%

      \[\leadsto \color{blue}{\sqrt{\mathsf{fma}\left(x, x, -0.001\right)} - x} \]
    5. Add Preprocessing

    Alternative 7: 38.5% accurate, 2.0× speedup?

    \[\begin{array}{l} \\ \frac{-0.0005}{x} \end{array} \]
    (FPCore (x) :precision binary64 (/ -0.0005 x))
    double code(double x) {
    	return -0.0005 / x;
    }
    
    real(8) function code(x)
        real(8), intent (in) :: x
        code = (-0.0005d0) / x
    end function
    
    public static double code(double x) {
    	return -0.0005 / x;
    }
    
    def code(x):
    	return -0.0005 / x
    
    function code(x)
    	return Float64(-0.0005 / x)
    end
    
    function tmp = code(x)
    	tmp = -0.0005 / x;
    end
    
    code[x_] := N[(-0.0005 / x), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \frac{-0.0005}{x}
    \end{array}
    
    Derivation
    1. Initial program 84.6%

      \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
    2. Add Preprocessing
    3. Taylor expanded in x around inf

      \[\leadsto \color{blue}{\frac{\frac{-1152921504606847}{2305843009213693952}}{x}} \]
    4. Step-by-step derivation
      1. lower-/.f6438.5

        \[\leadsto \color{blue}{\frac{-0.0005}{x}} \]
    5. Applied rewrites38.5%

      \[\leadsto \color{blue}{\frac{-0.0005}{x}} \]
    6. Add Preprocessing

    Alternative 8: 13.0% accurate, 4.0× speedup?

    \[\begin{array}{l} \\ -2 \cdot x \end{array} \]
    (FPCore (x) :precision binary64 (* -2.0 x))
    double code(double x) {
    	return -2.0 * x;
    }
    
    real(8) function code(x)
        real(8), intent (in) :: x
        code = (-2.0d0) * x
    end function
    
    public static double code(double x) {
    	return -2.0 * x;
    }
    
    def code(x):
    	return -2.0 * x
    
    function code(x)
    	return Float64(-2.0 * x)
    end
    
    function tmp = code(x)
    	tmp = -2.0 * x;
    end
    
    code[x_] := N[(-2.0 * x), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    -2 \cdot x
    \end{array}
    
    Derivation
    1. Initial program 84.6%

      \[\left(-x\right) + \sqrt{x \cdot x - 0.001} \]
    2. Add Preprocessing
    3. Taylor expanded in x around -inf

      \[\leadsto \color{blue}{-2 \cdot x} \]
    4. Step-by-step derivation
      1. lower-*.f6412.9

        \[\leadsto \color{blue}{-2 \cdot x} \]
    5. Applied rewrites12.9%

      \[\leadsto \color{blue}{-2 \cdot x} \]
    6. Add Preprocessing

    Reproduce

    ?
    herbie shell --seed 1 
    (FPCore (x)
      :name "-x+sqrt(x*x-0.001)"
      :precision binary64
      :pre (and (<= 1.0 x) (<= x 2.0))
      (+ (- x) (sqrt (- (* x x) 0.001))))