(FPCore (ij) :precision binary64 (/ (- (sqrt (+ 1.0 (* 8.0 ij))) 1.0) 2.0))
double code(double ij) { return (sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0; }
real(8) function code(ij) real(8), intent (in) :: ij code = (sqrt((1.0d0 + (8.0d0 * ij))) - 1.0d0) / 2.0d0 end function
public static double code(double ij) { return (Math.sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0; }
def code(ij): return (math.sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0
function code(ij) return Float64(Float64(sqrt(Float64(1.0 + Float64(8.0 * ij))) - 1.0) / 2.0) end
function tmp = code(ij) tmp = (sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0; end
code[ij_] := N[(N[(N[Sqrt[N[(1.0 + N[(8.0 * ij), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{1 + 8 \cdot ij} - 1}{2} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 8 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (ij) :precision binary64 (/ (- (sqrt (+ 1.0 (* 8.0 ij))) 1.0) 2.0))
double code(double ij) { return (sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0; }
real(8) function code(ij) real(8), intent (in) :: ij code = (sqrt((1.0d0 + (8.0d0 * ij))) - 1.0d0) / 2.0d0 end function
public static double code(double ij) { return (Math.sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0; }
def code(ij): return (math.sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0
function code(ij) return Float64(Float64(sqrt(Float64(1.0 + Float64(8.0 * ij))) - 1.0) / 2.0) end
function tmp = code(ij) tmp = (sqrt((1.0 + (8.0 * ij))) - 1.0) / 2.0; end
code[ij_] := N[(N[(N[Sqrt[N[(1.0 + N[(8.0 * ij), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision] / 2.0), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{1 + 8 \cdot ij} - 1}{2} \end{array}
(FPCore (ij) :precision binary64 (* (/ ij (+ (sqrt (fma 8.0 ij 1.0)) 1.0)) 4.0))
double code(double ij) { return (ij / (sqrt(fma(8.0, ij, 1.0)) + 1.0)) * 4.0; }
function code(ij) return Float64(Float64(ij / Float64(sqrt(fma(8.0, ij, 1.0)) + 1.0)) * 4.0) end
code[ij_] := N[(N[(ij / N[(N[Sqrt[N[(8.0 * ij + 1.0), $MachinePrecision]], $MachinePrecision] + 1.0), $MachinePrecision]), $MachinePrecision] * 4.0), $MachinePrecision]
\begin{array}{l} \\ \frac{ij}{\sqrt{\mathsf{fma}\left(8, ij, 1\right)} + 1} \cdot 4 \end{array}
Initial program 14.1%
lift-/.f64
N/A
lift--.f64
N/A
flip--
N/A
associate-/l/
N/A
lower-/.f64
N/A
lift-sqrt.f64
N/A
lift-sqrt.f64
N/A
rem-square-sqrt
N/A
lift-+.f64
N/A
+-commutative
N/A
metadata-eval
N/A
associate--l+
N/A
metadata-eval
N/A
lift-*.f64
N/A
*-commutative
N/A
lower-fma.f64
N/A
distribute-lft-in
N/A
metadata-eval
N/A
lower-fma.f64
100.0
Applied rewrites100.0%
lift-fma.f64
N/A
+-rgt-identity
N/A
lower-/.f64
N/A
lift-fma.f64
N/A
*-commutative
N/A
lift-sqrt.f64
N/A
lift-fma.f64
N/A
*-commutative
N/A
+-commutative
N/A
distribute-lft1-in
N/A
times-frac
N/A
metadata-eval
N/A
metadata-eval
N/A
lower-*.f64
N/A
Applied rewrites100.0%
(FPCore (ij) :precision binary64 (if (<= (* 8.0 ij) 0.0005) (* (fma (fma (fma -80.0 ij 16.0) ij -4.0) ij 2.0) ij) (fma (sqrt (fma ij 8.0 1.0)) 0.5 -0.5)))
double code(double ij) { double tmp; if ((8.0 * ij) <= 0.0005) { tmp = fma(fma(fma(-80.0, ij, 16.0), ij, -4.0), ij, 2.0) * ij; } else { tmp = fma(sqrt(fma(ij, 8.0, 1.0)), 0.5, -0.5); } return tmp; }
function code(ij) tmp = 0.0 if (Float64(8.0 * ij) <= 0.0005) tmp = Float64(fma(fma(fma(-80.0, ij, 16.0), ij, -4.0), ij, 2.0) * ij); else tmp = fma(sqrt(fma(ij, 8.0, 1.0)), 0.5, -0.5); end return tmp end
code[ij_] := If[LessEqual[N[(8.0 * ij), $MachinePrecision], 0.0005], N[(N[(N[(N[(-80.0 * ij + 16.0), $MachinePrecision] * ij + -4.0), $MachinePrecision] * ij + 2.0), $MachinePrecision] * ij), $MachinePrecision], N[(N[Sqrt[N[(ij * 8.0 + 1.0), $MachinePrecision]], $MachinePrecision] * 0.5 + -0.5), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;8 \cdot ij \leq 0.0005:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-80, ij, 16\right), ij, -4\right), ij, 2\right) \cdot ij\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\sqrt{\mathsf{fma}\left(ij, 8, 1\right)}, 0.5, -0.5\right)\\ \end{array} \end{array}
if (*.f64 #s(literal 8 binary64) ij) < 5.0000000000000001e-4
Initial program 8.5%
Taylor expanded in ij around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
sub-neg
N/A
*-commutative
N/A
metadata-eval
N/A
lower-fma.f64
N/A
+-commutative
N/A
lower-fma.f64
100.0
Applied rewrites100.0%
if 5.0000000000000001e-4 < (*.f64 #s(literal 8 binary64) ij)
Initial program 98.7%
lift-/.f64
N/A
lift--.f64
N/A
div-sub
N/A
metadata-eval
N/A
sub-neg
N/A
metadata-eval
N/A
metadata-eval
N/A
metadata-eval
N/A
div-inv
N/A
metadata-eval
N/A
lower-fma.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lift-*.f64
N/A
*-commutative
N/A
lower-fma.f64
N/A
metadata-eval
N/A
metadata-eval
98.7
Applied rewrites98.7%
(FPCore (ij) :precision binary64 (if (<= (* 8.0 ij) 0.0001) (fma (* (fma 16.0 ij -4.0) ij) ij (* 2.0 ij)) (fma (sqrt (fma ij 8.0 1.0)) 0.5 -0.5)))
double code(double ij) { double tmp; if ((8.0 * ij) <= 0.0001) { tmp = fma((fma(16.0, ij, -4.0) * ij), ij, (2.0 * ij)); } else { tmp = fma(sqrt(fma(ij, 8.0, 1.0)), 0.5, -0.5); } return tmp; }
function code(ij) tmp = 0.0 if (Float64(8.0 * ij) <= 0.0001) tmp = fma(Float64(fma(16.0, ij, -4.0) * ij), ij, Float64(2.0 * ij)); else tmp = fma(sqrt(fma(ij, 8.0, 1.0)), 0.5, -0.5); end return tmp end
code[ij_] := If[LessEqual[N[(8.0 * ij), $MachinePrecision], 0.0001], N[(N[(N[(16.0 * ij + -4.0), $MachinePrecision] * ij), $MachinePrecision] * ij + N[(2.0 * ij), $MachinePrecision]), $MachinePrecision], N[(N[Sqrt[N[(ij * 8.0 + 1.0), $MachinePrecision]], $MachinePrecision] * 0.5 + -0.5), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;8 \cdot ij \leq 0.0001:\\ \;\;\;\;\mathsf{fma}\left(\mathsf{fma}\left(16, ij, -4\right) \cdot ij, ij, 2 \cdot ij\right)\\ \mathbf{else}:\\ \;\;\;\;\mathsf{fma}\left(\sqrt{\mathsf{fma}\left(ij, 8, 1\right)}, 0.5, -0.5\right)\\ \end{array} \end{array}
if (*.f64 #s(literal 8 binary64) ij) < 1.00000000000000005e-4
Initial program 7.9%
Taylor expanded in ij around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
sub-neg
N/A
metadata-eval
N/A
lower-fma.f64
100.0
Applied rewrites100.0%
Applied rewrites100.0%
if 1.00000000000000005e-4 < (*.f64 #s(literal 8 binary64) ij)
Initial program 96.8%
lift-/.f64
N/A
lift--.f64
N/A
div-sub
N/A
metadata-eval
N/A
sub-neg
N/A
metadata-eval
N/A
metadata-eval
N/A
metadata-eval
N/A
div-inv
N/A
metadata-eval
N/A
lower-fma.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lift-*.f64
N/A
*-commutative
N/A
lower-fma.f64
N/A
metadata-eval
N/A
metadata-eval
96.8
Applied rewrites96.8%
(FPCore (ij) :precision binary64 (fma (* (fma 16.0 ij -4.0) ij) ij (* 2.0 ij)))
double code(double ij) { return fma((fma(16.0, ij, -4.0) * ij), ij, (2.0 * ij)); }
function code(ij) return fma(Float64(fma(16.0, ij, -4.0) * ij), ij, Float64(2.0 * ij)) end
code[ij_] := N[(N[(N[(16.0 * ij + -4.0), $MachinePrecision] * ij), $MachinePrecision] * ij + N[(2.0 * ij), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(16, ij, -4\right) \cdot ij, ij, 2 \cdot ij\right) \end{array}
Initial program 14.1%
Taylor expanded in ij around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
sub-neg
N/A
metadata-eval
N/A
lower-fma.f64
94.6
Applied rewrites94.6%
Applied rewrites94.6%
(FPCore (ij) :precision binary64 (* (fma (fma 16.0 ij -4.0) ij 2.0) ij))
double code(double ij) { return fma(fma(16.0, ij, -4.0), ij, 2.0) * ij; }
function code(ij) return Float64(fma(fma(16.0, ij, -4.0), ij, 2.0) * ij) end
code[ij_] := N[(N[(N[(16.0 * ij + -4.0), $MachinePrecision] * ij + 2.0), $MachinePrecision] * ij), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(\mathsf{fma}\left(16, ij, -4\right), ij, 2\right) \cdot ij \end{array}
Initial program 14.1%
Taylor expanded in ij around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
*-commutative
N/A
lower-fma.f64
N/A
sub-neg
N/A
metadata-eval
N/A
lower-fma.f64
94.6
Applied rewrites94.6%
(FPCore (ij) :precision binary64 (fma (* -4.0 ij) ij (* 2.0 ij)))
double code(double ij) { return fma((-4.0 * ij), ij, (2.0 * ij)); }
function code(ij) return fma(Float64(-4.0 * ij), ij, Float64(2.0 * ij)) end
code[ij_] := N[(N[(-4.0 * ij), $MachinePrecision] * ij + N[(2.0 * ij), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(-4 \cdot ij, ij, 2 \cdot ij\right) \end{array}
Initial program 14.1%
Taylor expanded in ij around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
93.7
Applied rewrites93.7%
Applied rewrites93.7%
(FPCore (ij) :precision binary64 (* (fma -4.0 ij 2.0) ij))
double code(double ij) { return fma(-4.0, ij, 2.0) * ij; }
function code(ij) return Float64(fma(-4.0, ij, 2.0) * ij) end
code[ij_] := N[(N[(-4.0 * ij + 2.0), $MachinePrecision] * ij), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(-4, ij, 2\right) \cdot ij \end{array}
Initial program 14.1%
Taylor expanded in ij around 0
*-commutative
N/A
lower-*.f64
N/A
+-commutative
N/A
lower-fma.f64
93.7
Applied rewrites93.7%
(FPCore (ij) :precision binary64 (* 2.0 ij))
double code(double ij) { return 2.0 * ij; }
real(8) function code(ij) real(8), intent (in) :: ij code = 2.0d0 * ij end function
public static double code(double ij) { return 2.0 * ij; }
def code(ij): return 2.0 * ij
function code(ij) return Float64(2.0 * ij) end
function tmp = code(ij) tmp = 2.0 * ij; end
code[ij_] := N[(2.0 * ij), $MachinePrecision]
\begin{array}{l} \\ 2 \cdot ij \end{array}
Initial program 14.1%
Taylor expanded in ij around 0
lower-*.f64
92.8
Applied rewrites92.8%
herbie shell --seed 1
(FPCore (ij)
:name "(sqrt(1 + 8 * ij) - 1) / 2"
:precision binary64
:pre (and (<= 0.0 ij) (<= ij 1e+15))
(/ (- (sqrt (+ 1.0 (* 8.0 ij))) 1.0) 2.0))