(FPCore (x) :precision binary64 (- (* 2.0 (/ x (- (pow 2.0 10.0) 1.0))) 1.0))
double code(double x) { return (2.0 * (x / (pow(2.0, 10.0) - 1.0))) - 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = (2.0d0 * (x / ((2.0d0 ** 10.0d0) - 1.0d0))) - 1.0d0 end function
public static double code(double x) { return (2.0 * (x / (Math.pow(2.0, 10.0) - 1.0))) - 1.0; }
def code(x): return (2.0 * (x / (math.pow(2.0, 10.0) - 1.0))) - 1.0
function code(x) return Float64(Float64(2.0 * Float64(x / Float64((2.0 ^ 10.0) - 1.0))) - 1.0) end
function tmp = code(x) tmp = (2.0 * (x / ((2.0 ^ 10.0) - 1.0))) - 1.0; end
code[x_] := N[(N[(2.0 * N[(x / N[(N[Power[2.0, 10.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ 2 \cdot \frac{x}{{2}^{10} - 1} - 1 \end{array}
Sampling outcomes in binary64 precision:
Herbie found 2 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (- (* 2.0 (/ x (- (pow 2.0 10.0) 1.0))) 1.0))
double code(double x) { return (2.0 * (x / (pow(2.0, 10.0) - 1.0))) - 1.0; }
real(8) function code(x) real(8), intent (in) :: x code = (2.0d0 * (x / ((2.0d0 ** 10.0d0) - 1.0d0))) - 1.0d0 end function
public static double code(double x) { return (2.0 * (x / (Math.pow(2.0, 10.0) - 1.0))) - 1.0; }
def code(x): return (2.0 * (x / (math.pow(2.0, 10.0) - 1.0))) - 1.0
function code(x) return Float64(Float64(2.0 * Float64(x / Float64((2.0 ^ 10.0) - 1.0))) - 1.0) end
function tmp = code(x) tmp = (2.0 * (x / ((2.0 ^ 10.0) - 1.0))) - 1.0; end
code[x_] := N[(N[(2.0 * N[(x / N[(N[Power[2.0, 10.0], $MachinePrecision] - 1.0), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] - 1.0), $MachinePrecision]
\begin{array}{l} \\ 2 \cdot \frac{x}{{2}^{10} - 1} - 1 \end{array}
(FPCore (x) :precision binary64 (fma x 0.0019550342130987292 -1.0))
double code(double x) { return fma(x, 0.0019550342130987292, -1.0); }
function code(x) return fma(x, 0.0019550342130987292, -1.0) end
code[x_] := N[(x * 0.0019550342130987292 + -1.0), $MachinePrecision]
\begin{array}{l} \\ \mathsf{fma}\left(x, 0.0019550342130987292, -1\right) \end{array}
Initial program 100.0%
lift--.f64
N/A
sub-neg
N/A
lift-*.f64
N/A
*-commutative
N/A
lift-/.f64
N/A
div-inv
N/A
associate-*l*
N/A
lower-fma.f64
N/A
lift--.f64
N/A
lift-pow.f64
N/A
metadata-eval
N/A
metadata-eval
N/A
metadata-eval
N/A
metadata-eval
N/A
metadata-eval
100.0
Applied rewrites100.0%
(FPCore (x) :precision binary64 -1.0)
double code(double x) { return -1.0; }
real(8) function code(x) real(8), intent (in) :: x code = -1.0d0 end function
public static double code(double x) { return -1.0; }
def code(x): return -1.0
function code(x) return -1.0 end
function tmp = code(x) tmp = -1.0; end
code[x_] := -1.0
\begin{array}{l} \\ -1 \end{array}
Initial program 100.0%
Taylor expanded in x around 0
Applied rewrites97.4%
herbie shell --seed 1
(FPCore (x)
:name "2.0*(x/(2^10-1))-1.0"
:precision binary64
:pre (and (<= 0.0 x) (<= x 1023.0))
(- (* 2.0 (/ x (- (pow 2.0 10.0) 1.0))) 1.0))