(FPCore (x) :precision binary64 (let* ((t_0 (/ (+ x 1.0) 2.0))) (/ (sqrt (- (pow t_0 2.0) (pow (sqrt x) 2.0))) (acosh (/ t_0 (sqrt x))))))
double code(double x) { double t_0 = (x + 1.0) / 2.0; return sqrt((pow(t_0, 2.0) - pow(sqrt(x), 2.0))) / acosh((t_0 / sqrt(x))); }
def code(x): t_0 = (x + 1.0) / 2.0 return math.sqrt((math.pow(t_0, 2.0) - math.pow(math.sqrt(x), 2.0))) / math.acosh((t_0 / math.sqrt(x)))
function code(x) t_0 = Float64(Float64(x + 1.0) / 2.0) return Float64(sqrt(Float64((t_0 ^ 2.0) - (sqrt(x) ^ 2.0))) / acosh(Float64(t_0 / sqrt(x)))) end
function tmp = code(x) t_0 = (x + 1.0) / 2.0; tmp = sqrt(((t_0 ^ 2.0) - (sqrt(x) ^ 2.0))) / acosh((t_0 / sqrt(x))); end
code[x_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]}, N[(N[Sqrt[N[(N[Power[t$95$0, 2.0], $MachinePrecision] - N[Power[N[Sqrt[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[ArcCosh[N[(t$95$0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{x + 1}{2}\\ \frac{\sqrt{{t\_0}^{2} - {\left(\sqrt{x}\right)}^{2}}}{\cosh^{-1} \left(\frac{t\_0}{\sqrt{x}}\right)} \end{array} \end{array}
Sampling outcomes in binary64 precision:
Herbie found 11 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (x) :precision binary64 (let* ((t_0 (/ (+ x 1.0) 2.0))) (/ (sqrt (- (pow t_0 2.0) (pow (sqrt x) 2.0))) (acosh (/ t_0 (sqrt x))))))
double code(double x) { double t_0 = (x + 1.0) / 2.0; return sqrt((pow(t_0, 2.0) - pow(sqrt(x), 2.0))) / acosh((t_0 / sqrt(x))); }
def code(x): t_0 = (x + 1.0) / 2.0 return math.sqrt((math.pow(t_0, 2.0) - math.pow(math.sqrt(x), 2.0))) / math.acosh((t_0 / math.sqrt(x)))
function code(x) t_0 = Float64(Float64(x + 1.0) / 2.0) return Float64(sqrt(Float64((t_0 ^ 2.0) - (sqrt(x) ^ 2.0))) / acosh(Float64(t_0 / sqrt(x)))) end
function tmp = code(x) t_0 = (x + 1.0) / 2.0; tmp = sqrt(((t_0 ^ 2.0) - (sqrt(x) ^ 2.0))) / acosh((t_0 / sqrt(x))); end
code[x_] := Block[{t$95$0 = N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision]}, N[(N[Sqrt[N[(N[Power[t$95$0, 2.0], $MachinePrecision] - N[Power[N[Sqrt[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[ArcCosh[N[(t$95$0 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := \frac{x + 1}{2}\\ \frac{\sqrt{{t\_0}^{2} - {\left(\sqrt{x}\right)}^{2}}}{\cosh^{-1} \left(\frac{t\_0}{\sqrt{x}}\right)} \end{array} \end{array}
(FPCore (x) :precision binary64 (/ (sqrt (- (pow (/ (+ x 1.0) 2.0) 2.0) (pow (sqrt x) 2.0))) (log (fma (/ 0.5 (sqrt x)) (+ 1.0 x) (sqrt (fma (pow (+ 1.0 x) 2.0) (pow (* 2.0 (sqrt x)) -2.0) -1.0))))))
double code(double x) { return sqrt((pow(((x + 1.0) / 2.0), 2.0) - pow(sqrt(x), 2.0))) / log(fma((0.5 / sqrt(x)), (1.0 + x), sqrt(fma(pow((1.0 + x), 2.0), pow((2.0 * sqrt(x)), -2.0), -1.0)))); }
function code(x) return Float64(sqrt(Float64((Float64(Float64(x + 1.0) / 2.0) ^ 2.0) - (sqrt(x) ^ 2.0))) / log(fma(Float64(0.5 / sqrt(x)), Float64(1.0 + x), sqrt(fma((Float64(1.0 + x) ^ 2.0), (Float64(2.0 * sqrt(x)) ^ -2.0), -1.0))))) end
code[x_] := N[(N[Sqrt[N[(N[Power[N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision], 2.0], $MachinePrecision] - N[Power[N[Sqrt[x], $MachinePrecision], 2.0], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[Log[N[(N[(0.5 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + x), $MachinePrecision] + N[Sqrt[N[(N[Power[N[(1.0 + x), $MachinePrecision], 2.0], $MachinePrecision] * N[Power[N[(2.0 * N[Sqrt[x], $MachinePrecision]), $MachinePrecision], -2.0], $MachinePrecision] + -1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{{\left(\frac{x + 1}{2}\right)}^{2} - {\left(\sqrt{x}\right)}^{2}}}{\log \left(\mathsf{fma}\left(\frac{0.5}{\sqrt{x}}, 1 + x, \sqrt{\mathsf{fma}\left({\left(1 + x\right)}^{2}, {\left(2 \cdot \sqrt{x}\right)}^{-2}, -1\right)}\right)\right)} \end{array}
Initial program 97.2%
lift-acosh.f64
N/A
acosh-def
N/A
lower-log.f64
N/A
lift-/.f64
N/A
lift-/.f64
N/A
div-inv
N/A
metadata-eval
N/A
associate-/l*
N/A
*-commutative
N/A
lower-fma.f64
N/A
lower-/.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
lower-sqrt.f64
N/A
Applied rewrites97.6%
Applied rewrites97.9%
(FPCore (x) :precision binary64 (let* ((t_0 (pow (fma 0.5 x 0.5) 2.0))) (/ (sqrt (- t_0 x)) (log (fma (/ 0.5 (sqrt x)) (+ 1.0 x) (sqrt (- (/ t_0 x) 1.0)))))))
double code(double x) { double t_0 = pow(fma(0.5, x, 0.5), 2.0); return sqrt((t_0 - x)) / log(fma((0.5 / sqrt(x)), (1.0 + x), sqrt(((t_0 / x) - 1.0)))); }
function code(x) t_0 = fma(0.5, x, 0.5) ^ 2.0 return Float64(sqrt(Float64(t_0 - x)) / log(fma(Float64(0.5 / sqrt(x)), Float64(1.0 + x), sqrt(Float64(Float64(t_0 / x) - 1.0))))) end
code[x_] := Block[{t$95$0 = N[Power[N[(0.5 * x + 0.5), $MachinePrecision], 2.0], $MachinePrecision]}, N[(N[Sqrt[N[(t$95$0 - x), $MachinePrecision]], $MachinePrecision] / N[Log[N[(N[(0.5 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + x), $MachinePrecision] + N[Sqrt[N[(N[(t$95$0 / x), $MachinePrecision] - 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l} \\ \begin{array}{l} t_0 := {\left(\mathsf{fma}\left(0.5, x, 0.5\right)\right)}^{2}\\ \frac{\sqrt{t\_0 - x}}{\log \left(\mathsf{fma}\left(\frac{0.5}{\sqrt{x}}, 1 + x, \sqrt{\frac{t\_0}{x} - 1}\right)\right)} \end{array} \end{array}
Initial program 97.2%
lift-acosh.f64
N/A
acosh-def
N/A
lower-log.f64
N/A
lift-/.f64
N/A
lift-/.f64
N/A
div-inv
N/A
metadata-eval
N/A
associate-/l*
N/A
*-commutative
N/A
lower-fma.f64
N/A
lower-/.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
lower-sqrt.f64
N/A
Applied rewrites97.6%
lift-pow.f64
N/A
lift-sqrt.f64
N/A
sqrt-pow2
N/A
metadata-eval
N/A
unpow1
97.9
lift-/.f64
N/A
div-inv
N/A
lift-+.f64
N/A
metadata-eval
N/A
distribute-lft1-in
N/A
*-commutative
N/A
lift-fma.f64
97.9
Applied rewrites97.9%
(FPCore (x) :precision binary64 (/ (* (sqrt (fma 0.5 (+ 1.0 x) (sqrt x))) (sqrt (- (fma 0.5 x 0.5) (sqrt x)))) (log (fma (/ 0.5 (sqrt x)) (+ 1.0 x) (sqrt (- (/ (pow (fma 0.5 x 0.5) 2.0) x) 1.0))))))
double code(double x) { return (sqrt(fma(0.5, (1.0 + x), sqrt(x))) * sqrt((fma(0.5, x, 0.5) - sqrt(x)))) / log(fma((0.5 / sqrt(x)), (1.0 + x), sqrt(((pow(fma(0.5, x, 0.5), 2.0) / x) - 1.0)))); }
function code(x) return Float64(Float64(sqrt(fma(0.5, Float64(1.0 + x), sqrt(x))) * sqrt(Float64(fma(0.5, x, 0.5) - sqrt(x)))) / log(fma(Float64(0.5 / sqrt(x)), Float64(1.0 + x), sqrt(Float64(Float64((fma(0.5, x, 0.5) ^ 2.0) / x) - 1.0))))) end
code[x_] := N[(N[(N[Sqrt[N[(0.5 * N[(1.0 + x), $MachinePrecision] + N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[Sqrt[N[(N[(0.5 * x + 0.5), $MachinePrecision] - N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[Log[N[(N[(0.5 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + x), $MachinePrecision] + N[Sqrt[N[(N[(N[Power[N[(0.5 * x + 0.5), $MachinePrecision], 2.0], $MachinePrecision] / x), $MachinePrecision] - 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{\mathsf{fma}\left(0.5, 1 + x, \sqrt{x}\right)} \cdot \sqrt{\mathsf{fma}\left(0.5, x, 0.5\right) - \sqrt{x}}}{\log \left(\mathsf{fma}\left(\frac{0.5}{\sqrt{x}}, 1 + x, \sqrt{\frac{{\left(\mathsf{fma}\left(0.5, x, 0.5\right)\right)}^{2}}{x} - 1}\right)\right)} \end{array}
Initial program 97.2%
lift-acosh.f64
N/A
acosh-def
N/A
lower-log.f64
N/A
lift-/.f64
N/A
lift-/.f64
N/A
div-inv
N/A
metadata-eval
N/A
associate-/l*
N/A
*-commutative
N/A
lower-fma.f64
N/A
lower-/.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
lower-sqrt.f64
N/A
Applied rewrites97.6%
Applied rewrites97.6%
(FPCore (x) :precision binary64 (/ (* (- 0.5 (/ 0.5 x)) x) (log (fma (/ 0.5 (sqrt x)) (+ 1.0 x) (sqrt (- (/ (pow (fma 0.5 x 0.5) 2.0) x) 1.0))))))
double code(double x) { return ((0.5 - (0.5 / x)) * x) / log(fma((0.5 / sqrt(x)), (1.0 + x), sqrt(((pow(fma(0.5, x, 0.5), 2.0) / x) - 1.0)))); }
function code(x) return Float64(Float64(Float64(0.5 - Float64(0.5 / x)) * x) / log(fma(Float64(0.5 / sqrt(x)), Float64(1.0 + x), sqrt(Float64(Float64((fma(0.5, x, 0.5) ^ 2.0) / x) - 1.0))))) end
code[x_] := N[(N[(N[(0.5 - N[(0.5 / x), $MachinePrecision]), $MachinePrecision] * x), $MachinePrecision] / N[Log[N[(N[(0.5 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision] * N[(1.0 + x), $MachinePrecision] + N[Sqrt[N[(N[(N[Power[N[(0.5 * x + 0.5), $MachinePrecision], 2.0], $MachinePrecision] / x), $MachinePrecision] - 1.0), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\left(0.5 - \frac{0.5}{x}\right) \cdot x}{\log \left(\mathsf{fma}\left(\frac{0.5}{\sqrt{x}}, 1 + x, \sqrt{\frac{{\left(\mathsf{fma}\left(0.5, x, 0.5\right)\right)}^{2}}{x} - 1}\right)\right)} \end{array}
Initial program 97.2%
lift-acosh.f64
N/A
acosh-def
N/A
lower-log.f64
N/A
lift-/.f64
N/A
lift-/.f64
N/A
div-inv
N/A
metadata-eval
N/A
associate-/l*
N/A
*-commutative
N/A
lower-fma.f64
N/A
lower-/.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
lower-sqrt.f64
N/A
Applied rewrites97.6%
Taylor expanded in x around inf
*-commutative
N/A
lower-*.f64
N/A
lower--.f64
N/A
associate-*r/
N/A
metadata-eval
N/A
lower-/.f64
97.5
Applied rewrites97.5%
(FPCore (x) :precision binary64 (/ (* (sqrt (fma -0.5 (- -1.0 x) (sqrt x))) (sqrt (- (fma 0.5 x 0.5) (sqrt x)))) (acosh (/ (/ (+ x 1.0) 2.0) (sqrt x)))))
double code(double x) { return (sqrt(fma(-0.5, (-1.0 - x), sqrt(x))) * sqrt((fma(0.5, x, 0.5) - sqrt(x)))) / acosh((((x + 1.0) / 2.0) / sqrt(x))); }
function code(x) return Float64(Float64(sqrt(fma(-0.5, Float64(-1.0 - x), sqrt(x))) * sqrt(Float64(fma(0.5, x, 0.5) - sqrt(x)))) / acosh(Float64(Float64(Float64(x + 1.0) / 2.0) / sqrt(x)))) end
code[x_] := N[(N[(N[Sqrt[N[(-0.5 * N[(-1.0 - x), $MachinePrecision] + N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] * N[Sqrt[N[(N[(0.5 * x + 0.5), $MachinePrecision] - N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision] / N[ArcCosh[N[(N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{\mathsf{fma}\left(-0.5, -1 - x, \sqrt{x}\right)} \cdot \sqrt{\mathsf{fma}\left(0.5, x, 0.5\right) - \sqrt{x}}}{\cosh^{-1} \left(\frac{\frac{x + 1}{2}}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
lift-sqrt.f64
N/A
lift--.f64
N/A
lift-pow.f64
N/A
unpow2
N/A
lift-pow.f64
N/A
unpow2
N/A
difference-of-squares
N/A
sqrt-prod
N/A
lower-*.f64
N/A
Applied rewrites97.4%
(FPCore (x) :precision binary64 (/ (sqrt (* (fma -0.5 (- -1.0 x) (sqrt x)) (- (fma 0.5 x 0.5) (sqrt x)))) (acosh (/ (/ (+ x 1.0) 2.0) (sqrt x)))))
double code(double x) { return sqrt((fma(-0.5, (-1.0 - x), sqrt(x)) * (fma(0.5, x, 0.5) - sqrt(x)))) / acosh((((x + 1.0) / 2.0) / sqrt(x))); }
function code(x) return Float64(sqrt(Float64(fma(-0.5, Float64(-1.0 - x), sqrt(x)) * Float64(fma(0.5, x, 0.5) - sqrt(x)))) / acosh(Float64(Float64(Float64(x + 1.0) / 2.0) / sqrt(x)))) end
code[x_] := N[(N[Sqrt[N[(N[(-0.5 * N[(-1.0 - x), $MachinePrecision] + N[Sqrt[x], $MachinePrecision]), $MachinePrecision] * N[(N[(0.5 * x + 0.5), $MachinePrecision] - N[Sqrt[x], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[ArcCosh[N[(N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{\mathsf{fma}\left(-0.5, -1 - x, \sqrt{x}\right) \cdot \left(\mathsf{fma}\left(0.5, x, 0.5\right) - \sqrt{x}\right)}}{\cosh^{-1} \left(\frac{\frac{x + 1}{2}}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
lift--.f64
N/A
lift-pow.f64
N/A
unpow2
N/A
lift-pow.f64
N/A
unpow2
N/A
difference-of-squares
N/A
lower-*.f64
N/A
lift-/.f64
N/A
clear-num
N/A
frac-2neg
N/A
associate-/r/
N/A
metadata-eval
N/A
metadata-eval
N/A
metadata-eval
N/A
lower-fma.f64
N/A
metadata-eval
N/A
lift-+.f64
N/A
+-commutative
N/A
distribute-neg-in
N/A
metadata-eval
N/A
unsub-neg
N/A
lower--.f64
N/A
Applied rewrites97.3%
(FPCore (x) :precision binary64 (/ (sqrt (fma (+ 1.0 x) (* (+ 1.0 x) 0.25) (- x))) (acosh (/ (/ (+ x 1.0) 2.0) (sqrt x)))))
double code(double x) { return sqrt(fma((1.0 + x), ((1.0 + x) * 0.25), -x)) / acosh((((x + 1.0) / 2.0) / sqrt(x))); }
function code(x) return Float64(sqrt(fma(Float64(1.0 + x), Float64(Float64(1.0 + x) * 0.25), Float64(-x))) / acosh(Float64(Float64(Float64(x + 1.0) / 2.0) / sqrt(x)))) end
code[x_] := N[(N[Sqrt[N[(N[(1.0 + x), $MachinePrecision] * N[(N[(1.0 + x), $MachinePrecision] * 0.25), $MachinePrecision] + (-x)), $MachinePrecision]], $MachinePrecision] / N[ArcCosh[N[(N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\sqrt{\mathsf{fma}\left(1 + x, \left(1 + x\right) \cdot 0.25, -x\right)}}{\cosh^{-1} \left(\frac{\frac{x + 1}{2}}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
lift--.f64
N/A
sub-neg
N/A
lift-pow.f64
N/A
lift-/.f64
N/A
div-inv
N/A
metadata-eval
N/A
unpow-prod-down
N/A
pow2
N/A
associate-*l*
N/A
lift-pow.f64
N/A
lift-sqrt.f64
N/A
sqrt-pow2
N/A
metadata-eval
N/A
unpow1
N/A
lower-fma.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
lower-*.f64
N/A
lift-+.f64
N/A
+-commutative
N/A
lower-+.f64
N/A
metadata-eval
N/A
lower-neg.f64
97.2
Applied rewrites97.2%
(FPCore (x) :precision binary64 (/ (fma 0.5 x -0.5) (acosh (/ (/ (+ x 1.0) 2.0) (sqrt x)))))
double code(double x) { return fma(0.5, x, -0.5) / acosh((((x + 1.0) / 2.0) / sqrt(x))); }
function code(x) return Float64(fma(0.5, x, -0.5) / acosh(Float64(Float64(Float64(x + 1.0) / 2.0) / sqrt(x)))) end
code[x_] := N[(N[(0.5 * x + -0.5), $MachinePrecision] / N[ArcCosh[N[(N[(N[(x + 1.0), $MachinePrecision] / 2.0), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{\mathsf{fma}\left(0.5, x, -0.5\right)}{\cosh^{-1} \left(\frac{\frac{x + 1}{2}}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
Taylor expanded in x around inf
Applied rewrites97.2%
(FPCore (x) :precision binary64 (/ (* -0.5 x) (acosh (/ (fma x 0.5 0.5) (sqrt x)))))
double code(double x) { return (-0.5 * x) / acosh((fma(x, 0.5, 0.5) / sqrt(x))); }
function code(x) return Float64(Float64(-0.5 * x) / acosh(Float64(fma(x, 0.5, 0.5) / sqrt(x)))) end
code[x_] := N[(N[(-0.5 * x), $MachinePrecision] / N[ArcCosh[N[(N[(x * 0.5 + 0.5), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{-0.5 \cdot x}{\cosh^{-1} \left(\frac{\mathsf{fma}\left(x, 0.5, 0.5\right)}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
Taylor expanded in x around -inf
lower-*.f64
1.6
Applied rewrites1.6%
lift-/.f64
N/A
div-inv
N/A
lift-+.f64
N/A
metadata-eval
N/A
distribute-lft1-in
N/A
lower-fma.f64
1.6
Applied rewrites1.6%
(FPCore (x) :precision binary64 (/ (* -0.5 x) (acosh (/ (* 0.5 x) (sqrt x)))))
double code(double x) { return (-0.5 * x) / acosh(((0.5 * x) / sqrt(x))); }
def code(x): return (-0.5 * x) / math.acosh(((0.5 * x) / math.sqrt(x)))
function code(x) return Float64(Float64(-0.5 * x) / acosh(Float64(Float64(0.5 * x) / sqrt(x)))) end
function tmp = code(x) tmp = (-0.5 * x) / acosh(((0.5 * x) / sqrt(x))); end
code[x_] := N[(N[(-0.5 * x), $MachinePrecision] / N[ArcCosh[N[(N[(0.5 * x), $MachinePrecision] / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{-0.5 \cdot x}{\cosh^{-1} \left(\frac{0.5 \cdot x}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
Taylor expanded in x around -inf
lower-*.f64
1.6
Applied rewrites1.6%
Taylor expanded in x around inf
lower-*.f64
0.5
Applied rewrites0.5%
(FPCore (x) :precision binary64 (/ (* -0.5 x) (acosh (/ 0.5 (sqrt x)))))
double code(double x) { return (-0.5 * x) / acosh((0.5 / sqrt(x))); }
def code(x): return (-0.5 * x) / math.acosh((0.5 / math.sqrt(x)))
function code(x) return Float64(Float64(-0.5 * x) / acosh(Float64(0.5 / sqrt(x)))) end
function tmp = code(x) tmp = (-0.5 * x) / acosh((0.5 / sqrt(x))); end
code[x_] := N[(N[(-0.5 * x), $MachinePrecision] / N[ArcCosh[N[(0.5 / N[Sqrt[x], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \frac{-0.5 \cdot x}{\cosh^{-1} \left(\frac{0.5}{\sqrt{x}}\right)} \end{array}
Initial program 97.2%
Taylor expanded in x around -inf
lower-*.f64
1.6
Applied rewrites1.6%
Taylor expanded in x around 0
Applied rewrites0.0%
herbie shell --seed 1
(FPCore (x)
:name " (sqrt(((x+1)/2)^2-(sqrt(x))^2)/acosh(((x+1)/2)/(sqrt(x)))) "
:precision binary64
:pre (and (<= 1.0 x) (<= x 10.0))
(/ (sqrt (- (pow (/ (+ x 1.0) 2.0) 2.0) (pow (sqrt x) 2.0))) (acosh (/ (/ (+ x 1.0) 2.0) (sqrt x)))))