(FPCore (x) :precision binary64 (let* ((t_0 (sqrt (- 1.0 (* x x))))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) { double t_0 = sqrt((1.0 - (x * x))); return (1.0 - t_0) / (1.0 + t_0); }
real(8) function code(x) real(8), intent (in) :: x real(8) :: t_0 t_0 = sqrt((1.0d0 - (x * x))) code = (1.0d0 - t_0) / (1.0d0 + t_0) end function
public static double code(double x) { double t_0 = Math.sqrt((1.0 - (x * x))); return (1.0 - t_0) / (1.0 + t_0); }
def code(x): t_0 = math.sqrt((1.0 - (x * x))) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = sqrt(Float64(1.0 - Float64(x * x))) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = sqrt((1.0 - (x * x))); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{1 - x \cdot x}\\ \frac{1 - t\_0}{1 + t\_0} \end{array} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (let* ((t_0 (sqrt (- 1.0 (* x x))))) (/ (- 1.0 t_0) (+ 1.0 t_0))))
double code(double x) { double t_0 = sqrt((1.0 - (x * x))); return (1.0 - t_0) / (1.0 + t_0); }
real(8) function code(x) real(8), intent (in) :: x real(8) :: t_0 t_0 = sqrt((1.0d0 - (x * x))) code = (1.0d0 - t_0) / (1.0d0 + t_0) end function
public static double code(double x) { double t_0 = Math.sqrt((1.0 - (x * x))); return (1.0 - t_0) / (1.0 + t_0); }
def code(x): t_0 = math.sqrt((1.0 - (x * x))) return (1.0 - t_0) / (1.0 + t_0)
function code(x) t_0 = sqrt(Float64(1.0 - Float64(x * x))) return Float64(Float64(1.0 - t_0) / Float64(1.0 + t_0)) end
function tmp = code(x) t_0 = sqrt((1.0 - (x * x))); tmp = (1.0 - t_0) / (1.0 + t_0); end
code[x_] := Block[{t$95$0 = N[Sqrt[N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]}, N[(N[(1.0 - t$95$0), $MachinePrecision] / N[(1.0 + t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := \sqrt{1 - x \cdot x}\\ \frac{1 - t\_0}{1 + t\_0} \end{array} \end{array}
(FPCore (x) :precision binary64 (let* ((t_0 (- 1.0 (* x x))) (t_1 (+ (pow t_0 1.5) 1.0)) (t_2 (sqrt t_0))) (fma (/ (* x x) t_1) (pow (+ t_2 1.0) -1.0) (* (- 1.0 (fma x x t_2)) (/ (- 1.0 t_2) t_1)))))
double code(double x) { double t_0 = 1.0 - (x * x); double t_1 = pow(t_0, 1.5) + 1.0; double t_2 = sqrt(t_0); return fma(((x * x) / t_1), pow((t_2 + 1.0), -1.0), ((1.0 - fma(x, x, t_2)) * ((1.0 - t_2) / t_1))); }
function code(x) t_0 = Float64(1.0 - Float64(x * x)) t_1 = Float64((t_0 ^ 1.5) + 1.0) t_2 = sqrt(t_0) return fma(Float64(Float64(x * x) / t_1), (Float64(t_2 + 1.0) ^ -1.0), Float64(Float64(1.0 - fma(x, x, t_2)) * Float64(Float64(1.0 - t_2) / t_1))) end
code[x_] := Block[{t$95$0 = N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[(N[Power[t$95$0, 1.5], $MachinePrecision] + 1.0), $MachinePrecision]}, Block[{t$95$2 = N[Sqrt[t$95$0], $MachinePrecision]}, N[(N[(N[(x * x), $MachinePrecision] / t$95$1), $MachinePrecision] * N[Power[N[(t$95$2 + 1.0), $MachinePrecision], -1.0], $MachinePrecision] + N[(N[(1.0 - N[(x * x + t$95$2), $MachinePrecision]), $MachinePrecision] * N[(N[(1.0 - t$95$2), $MachinePrecision] / t$95$1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]]
\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - x \cdot x\\ t_1 := {t\_0}^{1.5} + 1\\ t_2 := \sqrt{t\_0}\\ \mathsf{fma}\left(\frac{x \cdot x}{t\_1}, {\left(t\_2 + 1\right)}^{-1}, \left(1 - \mathsf{fma}\left(x, x, t\_2\right)\right) \cdot \frac{1 - t\_2}{t\_1}\right) \end{array} \end{array}
Initial program 52.7%
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (let* ((t_0 (- 1.0 (* x x))) (t_1 (sqrt t_0))) (* (/ (* x x) (+ (pow t_0 1.5) 1.0)) (/ (- (- 2.0 (* x x)) t_1) (+ t_1 1.0)))))
double code(double x) { double t_0 = 1.0 - (x * x); double t_1 = sqrt(t_0); return ((x * x) / (pow(t_0, 1.5) + 1.0)) * (((2.0 - (x * x)) - t_1) / (t_1 + 1.0)); }
real(8) function code(x) real(8), intent (in) :: x real(8) :: t_0 real(8) :: t_1 t_0 = 1.0d0 - (x * x) t_1 = sqrt(t_0) code = ((x * x) / ((t_0 ** 1.5d0) + 1.0d0)) * (((2.0d0 - (x * x)) - t_1) / (t_1 + 1.0d0)) end function
public static double code(double x) { double t_0 = 1.0 - (x * x); double t_1 = Math.sqrt(t_0); return ((x * x) / (Math.pow(t_0, 1.5) + 1.0)) * (((2.0 - (x * x)) - t_1) / (t_1 + 1.0)); }
def code(x): t_0 = 1.0 - (x * x) t_1 = math.sqrt(t_0) return ((x * x) / (math.pow(t_0, 1.5) + 1.0)) * (((2.0 - (x * x)) - t_1) / (t_1 + 1.0))
function code(x) t_0 = Float64(1.0 - Float64(x * x)) t_1 = sqrt(t_0) return Float64(Float64(Float64(x * x) / Float64((t_0 ^ 1.5) + 1.0)) * Float64(Float64(Float64(2.0 - Float64(x * x)) - t_1) / Float64(t_1 + 1.0))) end
function tmp = code(x) t_0 = 1.0 - (x * x); t_1 = sqrt(t_0); tmp = ((x * x) / ((t_0 ^ 1.5) + 1.0)) * (((2.0 - (x * x)) - t_1) / (t_1 + 1.0)); end
code[x_] := Block[{t$95$0 = N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]}, Block[{t$95$1 = N[Sqrt[t$95$0], $MachinePrecision]}, N[(N[(N[(x * x), $MachinePrecision] / N[(N[Power[t$95$0, 1.5], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * N[(N[(N[(2.0 - N[(x * x), $MachinePrecision]), $MachinePrecision] - t$95$1), $MachinePrecision] / N[(t$95$1 + 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]]]
\begin{array}{l} \\ \begin{array}{l} t_0 := 1 - x \cdot x\\ t_1 := \sqrt{t\_0}\\ \frac{x \cdot x}{{t\_0}^{1.5} + 1} \cdot \frac{\left(2 - x \cdot x\right) - t\_1}{t\_1 + 1} \end{array} \end{array}
Initial program 52.7%
lift-/.f64
N/A
lift--.f64
N/A
flip--
N/A
flip3-+
N/A
associate-/r/
N/A
associate-/l*
N/A
lower-*.f64
N/A
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (let* ((t_0 (- -1.0 (sqrt (- 1.0 (* x x)))))) (/ (* x x) (* t_0 t_0))))
double code(double x) { double t_0 = -1.0 - sqrt((1.0 - (x * x))); return (x * x) / (t_0 * t_0); }
real(8) function code(x) real(8), intent (in) :: x real(8) :: t_0 t_0 = (-1.0d0) - sqrt((1.0d0 - (x * x))) code = (x * x) / (t_0 * t_0) end function
public static double code(double x) { double t_0 = -1.0 - Math.sqrt((1.0 - (x * x))); return (x * x) / (t_0 * t_0); }
def code(x): t_0 = -1.0 - math.sqrt((1.0 - (x * x))) return (x * x) / (t_0 * t_0)
function code(x) t_0 = Float64(-1.0 - sqrt(Float64(1.0 - Float64(x * x)))) return Float64(Float64(x * x) / Float64(t_0 * t_0)) end
function tmp = code(x) t_0 = -1.0 - sqrt((1.0 - (x * x))); tmp = (x * x) / (t_0 * t_0); end
code[x_] := Block[{t$95$0 = N[(-1.0 - N[Sqrt[N[(1.0 - N[(x * x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]}, N[(N[(x * x), $MachinePrecision] / N[(t$95$0 * t$95$0), $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := -1 - \sqrt{1 - x \cdot x}\\ \frac{x \cdot x}{t\_0 \cdot t\_0} \end{array} \end{array}
Initial program 52.7%
lift-/.f64
N/A
frac-2neg
N/A
lift--.f64
N/A
flip--
N/A
lift-+.f64
N/A
distribute-neg-frac2
N/A
associate-/l/
N/A
lower-/.f64
N/A
Applied rewrites100.0%
Final simplification100.0%
(FPCore (x) :precision binary64 (* (fma (* (* (fma (fma 0.0546875 (* x x) 0.078125) (* x x) 0.125) x) x) x (* 0.25 x)) x))
double code(double x) { return fma(((fma(fma(0.0546875, (x * x), 0.078125), (x * x), 0.125) * x) * x), x, (0.25 * x)) * x; }
function code(x) return Float64(fma(Float64(Float64(fma(fma(0.0546875, Float64(x * x), 0.078125), Float64(x * x), 0.125) * x) * x), x, Float64(0.25 * x)) * x) end
code[x_] := N[(N[(N[(N[(N[(N[(0.0546875 * N[(x * x), $MachinePrecision] + 0.078125), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision] * x + N[(0.25 * x), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0546875, x \cdot x, 0.078125\right), x \cdot x, 0.125\right) \cdot x\right) \cdot x, x, 0.25 \cdot x\right) \cdot x \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
unpow2
N/A
associate-*r*
N/A
lower-*.f64
N/A
Applied rewrites99.7%
Applied rewrites99.7%
(FPCore (x) :precision binary64 (* (* (fma (fma (fma 0.0546875 (* x x) 0.078125) (* x x) 0.125) (* x x) 0.25) x) x))
double code(double x) { return (fma(fma(fma(0.0546875, (x * x), 0.078125), (x * x), 0.125), (x * x), 0.25) * x) * x; }
function code(x) return Float64(Float64(fma(fma(fma(0.0546875, Float64(x * x), 0.078125), Float64(x * x), 0.125), Float64(x * x), 0.25) * x) * x) end
code[x_] := N[(N[(N[(N[(N[(0.0546875 * N[(x * x), $MachinePrecision] + 0.078125), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(0.0546875, x \cdot x, 0.078125\right), x \cdot x, 0.125\right), x \cdot x, 0.25\right) \cdot x\right) \cdot x \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
unpow2
N/A
associate-*r*
N/A
lower-*.f64
N/A
Applied rewrites99.7%
(FPCore (x) :precision binary64 (* (* (fma (fma 0.078125 (* x x) 0.125) (* x x) 0.25) x) x))
double code(double x) { return (fma(fma(0.078125, (x * x), 0.125), (x * x), 0.25) * x) * x; }
function code(x) return Float64(Float64(fma(fma(0.078125, Float64(x * x), 0.125), Float64(x * x), 0.25) * x) * x) end
code[x_] := N[(N[(N[(N[(0.078125 * N[(x * x), $MachinePrecision] + 0.125), $MachinePrecision] * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \left(\mathsf{fma}\left(\mathsf{fma}\left(0.078125, x \cdot x, 0.125\right), x \cdot x, 0.25\right) \cdot x\right) \cdot x \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
unpow2
N/A
associate-*r*
N/A
lower-*.f64
N/A
lower-*.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
99.6
Applied rewrites99.6%
(FPCore (x) :precision binary64 (* (fma (* 0.125 (* x x)) x (* 0.25 x)) x))
double code(double x) { return fma((0.125 * (x * x)), x, (0.25 * x)) * x; }
function code(x) return Float64(fma(Float64(0.125 * Float64(x * x)), x, Float64(0.25 * x)) * x) end
code[x_] := N[(N[(N[(0.125 * N[(x * x), $MachinePrecision]), $MachinePrecision] * x + N[(0.25 * x), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(0.125 \cdot \left(x \cdot x\right), x, 0.25 \cdot x\right) \cdot x \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
unpow2
N/A
associate-*r*
N/A
lower-*.f64
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
99.4
Applied rewrites99.4%
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* (* (fma 0.125 (* x x) 0.25) x) x))
double code(double x) { return (fma(0.125, (x * x), 0.25) * x) * x; }
function code(x) return Float64(Float64(fma(0.125, Float64(x * x), 0.25) * x) * x) end
code[x_] := N[(N[(N[(0.125 * N[(x * x), $MachinePrecision] + 0.25), $MachinePrecision] * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \left(\mathsf{fma}\left(0.125, x \cdot x, 0.25\right) \cdot x\right) \cdot x \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
unpow2
N/A
associate-*r*
N/A
lower-*.f64
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
99.4
Applied rewrites99.4%
(FPCore (x) :precision binary64 (* (* 0.25 x) x))
double code(double x) { return (0.25 * x) * x; }
real(8) function code(x) real(8), intent (in) :: x code = (0.25d0 * x) * x end function
public static double code(double x) { return (0.25 * x) * x; }
def code(x): return (0.25 * x) * x
function code(x) return Float64(Float64(0.25 * x) * x) end
function tmp = code(x) tmp = (0.25 * x) * x; end
code[x_] := N[(N[(0.25 * x), $MachinePrecision] * x), $MachinePrecision]
\begin{array}{l} \\ \left(0.25 \cdot x\right) \cdot x \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
unpow2
N/A
associate-*r*
N/A
lower-*.f64
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
N/A
unpow2
N/A
lower-*.f64
99.4
Applied rewrites99.4%
Taylor expanded in x around 0
Applied rewrites98.9%
(FPCore (x) :precision binary64 (* (* x x) 0.25))
double code(double x) { return (x * x) * 0.25; }
real(8) function code(x) real(8), intent (in) :: x code = (x * x) * 0.25d0 end function
public static double code(double x) { return (x * x) * 0.25; }
def code(x): return (x * x) * 0.25
function code(x) return Float64(Float64(x * x) * 0.25) end
function tmp = code(x) tmp = (x * x) * 0.25; end
code[x_] := N[(N[(x * x), $MachinePrecision] * 0.25), $MachinePrecision]
\begin{array}{l} \\ \left(x \cdot x\right) \cdot 0.25 \end{array}
Initial program 52.7%
Taylor expanded in x around 0
*-commutative
N/A
lower-*.f64
N/A
unpow2
N/A
lower-*.f64
98.9
Applied rewrites98.9%
(FPCore (x) :precision binary64 1.0)
double code(double x) { return 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = 1.0d0 end function
public static double code(double x) { return 1.0; }
def code(x): return 1.0
function code(x) return 1.0 end
function tmp = code(x) tmp = 1.0; end
code[x_] := 1.0
\begin{array}{l} \\ 1 \end{array}
Initial program 52.7%
lift-/.f64
N/A
lift--.f64
N/A
flip--
N/A
flip3-+
N/A
associate-/r/
N/A
associate-/l*
N/A
lower-*.f64
N/A
Applied rewrites100.0%
Taylor expanded in x around inf
unpow2
N/A
rem-square-sqrt
N/A
metadata-eval
4.3
Applied rewrites4.3%
herbie shell --seed 1
(FPCore (x)
:name "(1-sqrt(1-x*x))/(1+sqrt(1-x*x))"
:precision binary64
:pre (and (<= 0.0 x) (<= x 1.0))
(/ (- 1.0 (sqrt (- 1.0 (* x x)))) (+ 1.0 (sqrt (- 1.0 (* x x))))))