pow(x+y,x+y-1/2)

Percentage Accurate: 96.6% → 97.8%
Time: 3.1s
Alternatives: 8
Speedup: 1.0×

Specification

?
\[\left(0.001 \leq x \land x \leq 100\right) \land \left(0.001 \leq y \land y \leq 100\right)\]
\[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
(FPCore (x y)
  :precision binary64
  (pow (+ x y) (- (+ x y) (/ 1.0 2.0))))
double code(double x, double y) {
	return pow((x + y), ((x + y) - (1.0 / 2.0)));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (x + y) ** ((x + y) - (1.0d0 / 2.0d0))
end function
public static double code(double x, double y) {
	return Math.pow((x + y), ((x + y) - (1.0 / 2.0)));
}
def code(x, y):
	return math.pow((x + y), ((x + y) - (1.0 / 2.0)))
function code(x, y)
	return Float64(x + y) ^ Float64(Float64(x + y) - Float64(1.0 / 2.0))
end
function tmp = code(x, y)
	tmp = (x + y) ^ ((x + y) - (1.0 / 2.0));
end
code[x_, y_] := N[Power[N[(x + y), $MachinePrecision], N[(N[(x + y), $MachinePrecision] - N[(1.0 / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 8 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 96.6% accurate, 1.0× speedup?

\[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
(FPCore (x y)
  :precision binary64
  (pow (+ x y) (- (+ x y) (/ 1.0 2.0))))
double code(double x, double y) {
	return pow((x + y), ((x + y) - (1.0 / 2.0)));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (x + y) ** ((x + y) - (1.0d0 / 2.0d0))
end function
public static double code(double x, double y) {
	return Math.pow((x + y), ((x + y) - (1.0 / 2.0)));
}
def code(x, y):
	return math.pow((x + y), ((x + y) - (1.0 / 2.0)))
function code(x, y)
	return Float64(x + y) ^ Float64(Float64(x + y) - Float64(1.0 / 2.0))
end
function tmp = code(x, y)
	tmp = (x + y) ^ ((x + y) - (1.0 / 2.0));
end
code[x_, y_] := N[Power[N[(x + y), $MachinePrecision], N[(N[(x + y), $MachinePrecision] - N[(1.0 / 2.0), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)}

Alternative 1: 97.8% accurate, 0.4× speedup?

\[{\left(\mathsf{max}\left(x, y\right) + \mathsf{min}\left(x, y\right)\right)}^{\left(\left(\mathsf{max}\left(x, y\right) - 0.5\right) \cdot 1\right)} \cdot {\left(\left(1 + \frac{\mathsf{min}\left(x, y\right)}{\mathsf{max}\left(x, y\right)}\right) \cdot \mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{min}\left(x, y\right)\right)} \]
(FPCore (x y)
  :precision binary64
  (*
 (pow (+ (fmax x y) (fmin x y)) (* (- (fmax x y) 0.5) 1.0))
 (pow (* (+ 1.0 (/ (fmin x y) (fmax x y))) (fmax x y)) (fmin x y))))
double code(double x, double y) {
	return pow((fmax(x, y) + fmin(x, y)), ((fmax(x, y) - 0.5) * 1.0)) * pow(((1.0 + (fmin(x, y) / fmax(x, y))) * fmax(x, y)), fmin(x, y));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = ((merge(y, merge(x, max(x, y), y /= y), x /= x) + merge(y, merge(x, min(x, y), y /= y), x /= x)) ** ((merge(y, merge(x, max(x, y), y /= y), x /= x) - 0.5d0) * 1.0d0)) * (((1.0d0 + (merge(y, merge(x, min(x, y), y /= y), x /= x) / merge(y, merge(x, max(x, y), y /= y), x /= x))) * merge(y, merge(x, max(x, y), y /= y), x /= x)) ** merge(y, merge(x, min(x, y), y /= y), x /= x))
end function
public static double code(double x, double y) {
	return Math.pow((fmax(x, y) + fmin(x, y)), ((fmax(x, y) - 0.5) * 1.0)) * Math.pow(((1.0 + (fmin(x, y) / fmax(x, y))) * fmax(x, y)), fmin(x, y));
}
def code(x, y):
	return math.pow((fmax(x, y) + fmin(x, y)), ((fmax(x, y) - 0.5) * 1.0)) * math.pow(((1.0 + (fmin(x, y) / fmax(x, y))) * fmax(x, y)), fmin(x, y))
function code(x, y)
	return Float64((Float64(((x != x) ? y : ((y != y) ? x : max(x, y))) + ((x != x) ? y : ((y != y) ? x : min(x, y)))) ^ Float64(Float64(((x != x) ? y : ((y != y) ? x : max(x, y))) - 0.5) * 1.0)) * (Float64(Float64(1.0 + Float64(((x != x) ? y : ((y != y) ? x : min(x, y))) / ((x != x) ? y : ((y != y) ? x : max(x, y))))) * ((x != x) ? y : ((y != y) ? x : max(x, y)))) ^ ((x != x) ? y : ((y != y) ? x : min(x, y)))))
end
function tmp = code(x, y)
	tmp = ((max(x, y) + min(x, y)) ^ ((max(x, y) - 0.5) * 1.0)) * (((1.0 + (min(x, y) / max(x, y))) * max(x, y)) ^ min(x, y));
end
code[x_, y_] := N[(N[Power[N[(N[Max[x, y], $MachinePrecision] + N[Min[x, y], $MachinePrecision]), $MachinePrecision], N[(N[(N[Max[x, y], $MachinePrecision] - 0.5), $MachinePrecision] * 1.0), $MachinePrecision]], $MachinePrecision] * N[Power[N[(N[(1.0 + N[(N[Min[x, y], $MachinePrecision] / N[Max[x, y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * N[Max[x, y], $MachinePrecision]), $MachinePrecision], N[Min[x, y], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
{\left(\mathsf{max}\left(x, y\right) + \mathsf{min}\left(x, y\right)\right)}^{\left(\left(\mathsf{max}\left(x, y\right) - 0.5\right) \cdot 1\right)} \cdot {\left(\left(1 + \frac{\mathsf{min}\left(x, y\right)}{\mathsf{max}\left(x, y\right)}\right) \cdot \mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{min}\left(x, y\right)\right)}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x + y\right)} - \frac{1}{2}\right)} \]
    4. associate--l+N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x + \left(y - \frac{1}{2}\right)\right)}} \]
    5. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(y - \frac{1}{2}\right) + x\right)}} \]
    6. pow-addN/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x}} \]
    7. lower-unsound-*.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x}} \]
    8. lower-unsound-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(y - \frac{1}{2}\right)}} \cdot {\left(x + y\right)}^{x} \]
    9. lift-+.f64N/A

      \[\leadsto {\color{blue}{\left(x + y\right)}}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x} \]
    10. +-commutativeN/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x} \]
    11. lower-+.f64N/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x} \]
    12. lower--.f64N/A

      \[\leadsto {\left(y + x\right)}^{\color{blue}{\left(y - \frac{1}{2}\right)}} \cdot {\left(x + y\right)}^{x} \]
    13. lift-/.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \color{blue}{\frac{1}{2}}\right)} \cdot {\left(x + y\right)}^{x} \]
    14. metadata-evalN/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \color{blue}{\frac{1}{2}}\right)} \cdot {\left(x + y\right)}^{x} \]
    15. lower-unsound-pow.f6497.8%

      \[\leadsto {\left(y + x\right)}^{\left(y - 0.5\right)} \cdot \color{blue}{{\left(x + y\right)}^{x}} \]
    16. lift-+.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\color{blue}{\left(x + y\right)}}^{x} \]
    17. +-commutativeN/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\color{blue}{\left(y + x\right)}}^{x} \]
    18. lower-+.f6497.8%

      \[\leadsto {\left(y + x\right)}^{\left(y - 0.5\right)} \cdot {\color{blue}{\left(y + x\right)}}^{x} \]
  3. Applied rewrites97.8%

    \[\leadsto \color{blue}{{\left(y + x\right)}^{\left(y - 0.5\right)} \cdot {\left(y + x\right)}^{x}} \]
  4. Step-by-step derivation
    1. lift-pow.f64N/A

      \[\leadsto \color{blue}{{\left(y + x\right)}^{\left(y - \frac{1}{2}\right)}} \cdot {\left(y + x\right)}^{x} \]
    2. sqr-powN/A

      \[\leadsto \color{blue}{\left({\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right)} \cdot {\left(y + x\right)}^{x} \]
    3. lower-unsound-*.f64N/A

      \[\leadsto \color{blue}{\left({\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right)} \cdot {\left(y + x\right)}^{x} \]
    4. lift-+.f64N/A

      \[\leadsto \left({\color{blue}{\left(y + x\right)}}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    5. +-commutativeN/A

      \[\leadsto \left({\color{blue}{\left(x + y\right)}}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    6. lift-+.f64N/A

      \[\leadsto \left({\color{blue}{\left(x + y\right)}}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    7. lower-unsound-pow.f64N/A

      \[\leadsto \left(\color{blue}{{\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    8. lower-unsound-/.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\color{blue}{\left(\frac{y - \frac{1}{2}}{2}\right)}} \cdot {\left(y + x\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    9. lift-+.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\color{blue}{\left(y + x\right)}}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    10. +-commutativeN/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\color{blue}{\left(x + y\right)}}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    11. lift-+.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\color{blue}{\left(x + y\right)}}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(y + x\right)}^{x} \]
    12. lower-unsound-pow.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot \color{blue}{{\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}}\right) \cdot {\left(y + x\right)}^{x} \]
    13. lower-unsound-/.f6497.6%

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)} \cdot {\left(x + y\right)}^{\color{blue}{\left(\frac{y - 0.5}{2}\right)}}\right) \cdot {\left(y + x\right)}^{x} \]
  5. Applied rewrites97.6%

    \[\leadsto \color{blue}{\left({\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)}\right)} \cdot {\left(y + x\right)}^{x} \]
  6. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\color{blue}{\left(y + x\right)}}^{x} \]
    2. sum-to-multN/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\color{blue}{\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}}^{x} \]
    3. lower-unsound-*.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\color{blue}{\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}}^{x} \]
    4. lower-unsound-+.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(\color{blue}{\left(1 + \frac{x}{y}\right)} \cdot y\right)}^{x} \]
    5. lower-unsound-/.f6497.5%

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)}\right) \cdot {\left(\left(1 + \color{blue}{\frac{x}{y}}\right) \cdot y\right)}^{x} \]
  7. Applied rewrites97.5%

    \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - 0.5}{2}\right)}\right) \cdot {\color{blue}{\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}}^{x} \]
  8. Step-by-step derivation
    1. lift-*.f64N/A

      \[\leadsto \color{blue}{\left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    2. lift-pow.f64N/A

      \[\leadsto \left(\color{blue}{{\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}} \cdot {\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}\right) \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    3. lift-pow.f64N/A

      \[\leadsto \left({\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)} \cdot \color{blue}{{\left(x + y\right)}^{\left(\frac{y - \frac{1}{2}}{2}\right)}}\right) \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    4. pow-sqrN/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(2 \cdot \frac{y - \frac{1}{2}}{2}\right)}} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    5. lower-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(2 \cdot \frac{y - \frac{1}{2}}{2}\right)}} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    6. lift-+.f64N/A

      \[\leadsto {\color{blue}{\left(x + y\right)}}^{\left(2 \cdot \frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    7. +-commutativeN/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(2 \cdot \frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    8. lift-+.f64N/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(2 \cdot \frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    9. count-2-revN/A

      \[\leadsto {\left(y + x\right)}^{\color{blue}{\left(\frac{y - \frac{1}{2}}{2} + \frac{y - \frac{1}{2}}{2}\right)}} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    10. lift-/.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(\color{blue}{\frac{y - \frac{1}{2}}{2}} + \frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    11. mult-flipN/A

      \[\leadsto {\left(y + x\right)}^{\left(\color{blue}{\left(y - \frac{1}{2}\right) \cdot \frac{1}{2}} + \frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    12. metadata-evalN/A

      \[\leadsto {\left(y + x\right)}^{\left(\left(y - \frac{1}{2}\right) \cdot \color{blue}{\frac{1}{2}} + \frac{y - \frac{1}{2}}{2}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    13. lift-/.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(\left(y - \frac{1}{2}\right) \cdot \frac{1}{2} + \color{blue}{\frac{y - \frac{1}{2}}{2}}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    14. mult-flipN/A

      \[\leadsto {\left(y + x\right)}^{\left(\left(y - \frac{1}{2}\right) \cdot \frac{1}{2} + \color{blue}{\left(y - \frac{1}{2}\right) \cdot \frac{1}{2}}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    15. metadata-evalN/A

      \[\leadsto {\left(y + x\right)}^{\left(\left(y - \frac{1}{2}\right) \cdot \frac{1}{2} + \left(y - \frac{1}{2}\right) \cdot \color{blue}{\frac{1}{2}}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    16. distribute-lft-outN/A

      \[\leadsto {\left(y + x\right)}^{\color{blue}{\left(\left(y - \frac{1}{2}\right) \cdot \left(\frac{1}{2} + \frac{1}{2}\right)\right)}} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    17. metadata-evalN/A

      \[\leadsto {\left(y + x\right)}^{\left(\left(y - \frac{1}{2}\right) \cdot \color{blue}{1}\right)} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
    18. lower-*.f6497.6%

      \[\leadsto {\left(y + x\right)}^{\color{blue}{\left(\left(y - 0.5\right) \cdot 1\right)}} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
  9. Applied rewrites97.6%

    \[\leadsto \color{blue}{{\left(y + x\right)}^{\left(\left(y - 0.5\right) \cdot 1\right)}} \cdot {\left(\left(1 + \frac{x}{y}\right) \cdot y\right)}^{x} \]
  10. Add Preprocessing

Alternative 2: 97.8% accurate, 0.4× speedup?

\[\begin{array}{l} t_0 := \mathsf{min}\left(x, y\right) + \mathsf{max}\left(x, y\right)\\ \frac{{t\_0}^{\left(-0.25 + \mathsf{max}\left(x, y\right)\right)}}{{t\_0}^{\left(0.25 - \mathsf{min}\left(x, y\right)\right)}} \end{array} \]
(FPCore (x y)
  :precision binary64
  (let* ((t_0 (+ (fmin x y) (fmax x y))))
  (/ (pow t_0 (+ -0.25 (fmax x y))) (pow t_0 (- 0.25 (fmin x y))))))
double code(double x, double y) {
	double t_0 = fmin(x, y) + fmax(x, y);
	return pow(t_0, (-0.25 + fmax(x, y))) / pow(t_0, (0.25 - fmin(x, y)));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: t_0
    t_0 = merge(y, merge(x, min(x, y), y /= y), x /= x) + merge(y, merge(x, max(x, y), y /= y), x /= x)
    code = (t_0 ** ((-0.25d0) + merge(y, merge(x, max(x, y), y /= y), x /= x))) / (t_0 ** (0.25d0 - merge(y, merge(x, min(x, y), y /= y), x /= x)))
end function
public static double code(double x, double y) {
	double t_0 = fmin(x, y) + fmax(x, y);
	return Math.pow(t_0, (-0.25 + fmax(x, y))) / Math.pow(t_0, (0.25 - fmin(x, y)));
}
def code(x, y):
	t_0 = fmin(x, y) + fmax(x, y)
	return math.pow(t_0, (-0.25 + fmax(x, y))) / math.pow(t_0, (0.25 - fmin(x, y)))
function code(x, y)
	t_0 = Float64(((x != x) ? y : ((y != y) ? x : min(x, y))) + ((x != x) ? y : ((y != y) ? x : max(x, y))))
	return Float64((t_0 ^ Float64(-0.25 + ((x != x) ? y : ((y != y) ? x : max(x, y))))) / (t_0 ^ Float64(0.25 - ((x != x) ? y : ((y != y) ? x : min(x, y))))))
end
function tmp = code(x, y)
	t_0 = min(x, y) + max(x, y);
	tmp = (t_0 ^ (-0.25 + max(x, y))) / (t_0 ^ (0.25 - min(x, y)));
end
code[x_, y_] := Block[{t$95$0 = N[(N[Min[x, y], $MachinePrecision] + N[Max[x, y], $MachinePrecision]), $MachinePrecision]}, N[(N[Power[t$95$0, N[(-0.25 + N[Max[x, y], $MachinePrecision]), $MachinePrecision]], $MachinePrecision] / N[Power[t$95$0, N[(0.25 - N[Min[x, y], $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
t_0 := \mathsf{min}\left(x, y\right) + \mathsf{max}\left(x, y\right)\\
\frac{{t\_0}^{\left(-0.25 + \mathsf{max}\left(x, y\right)\right)}}{{t\_0}^{\left(0.25 - \mathsf{min}\left(x, y\right)\right)}}
\end{array}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x + y\right)} - \frac{1}{2}\right)} \]
    4. add-flipN/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x - \left(\mathsf{neg}\left(y\right)\right)\right)} - \frac{1}{2}\right)} \]
    5. associate--l-N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x - \left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)\right)}} \]
    6. pow-subN/A

      \[\leadsto \color{blue}{\frac{{\left(x + y\right)}^{x}}{{\left(x + y\right)}^{\left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)}}} \]
    7. lower-unsound-/.f64N/A

      \[\leadsto \color{blue}{\frac{{\left(x + y\right)}^{x}}{{\left(x + y\right)}^{\left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)}}} \]
    8. lower-unsound-pow.f64N/A

      \[\leadsto \frac{\color{blue}{{\left(x + y\right)}^{x}}}{{\left(x + y\right)}^{\left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)}} \]
    9. lift-+.f64N/A

      \[\leadsto \frac{{\color{blue}{\left(x + y\right)}}^{x}}{{\left(x + y\right)}^{\left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)}} \]
    10. +-commutativeN/A

      \[\leadsto \frac{{\color{blue}{\left(y + x\right)}}^{x}}{{\left(x + y\right)}^{\left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)}} \]
    11. lower-+.f64N/A

      \[\leadsto \frac{{\color{blue}{\left(y + x\right)}}^{x}}{{\left(x + y\right)}^{\left(\left(\mathsf{neg}\left(y\right)\right) + \frac{1}{2}\right)}} \]
    12. +-commutativeN/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\left(x + y\right)}^{\color{blue}{\left(\frac{1}{2} + \left(\mathsf{neg}\left(y\right)\right)\right)}}} \]
    13. sub-flipN/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\left(x + y\right)}^{\color{blue}{\left(\frac{1}{2} - y\right)}}} \]
    14. lower-unsound-pow.f64N/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{\color{blue}{{\left(x + y\right)}^{\left(\frac{1}{2} - y\right)}}} \]
    15. lift-+.f64N/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\color{blue}{\left(x + y\right)}}^{\left(\frac{1}{2} - y\right)}} \]
    16. +-commutativeN/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\color{blue}{\left(y + x\right)}}^{\left(\frac{1}{2} - y\right)}} \]
    17. lower-+.f64N/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\color{blue}{\left(y + x\right)}}^{\left(\frac{1}{2} - y\right)}} \]
    18. lower--.f6497.8%

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\left(y + x\right)}^{\color{blue}{\left(\frac{1}{2} - y\right)}}} \]
    19. lift-/.f64N/A

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\left(y + x\right)}^{\left(\color{blue}{\frac{1}{2}} - y\right)}} \]
    20. metadata-eval97.8%

      \[\leadsto \frac{{\left(y + x\right)}^{x}}{{\left(y + x\right)}^{\left(\color{blue}{0.5} - y\right)}} \]
  3. Applied rewrites97.8%

    \[\leadsto \color{blue}{\frac{{\left(y + x\right)}^{x}}{{\left(y + x\right)}^{\left(0.5 - y\right)}}} \]
  4. Applied rewrites97.8%

    \[\leadsto \color{blue}{\frac{{\left(x + y\right)}^{\left(-0.25 + y\right)}}{{\left(x + y\right)}^{\left(0.25 - x\right)}}} \]
  5. Add Preprocessing

Alternative 3: 97.8% accurate, 0.6× speedup?

\[{\left(y + x\right)}^{\left(y - 0.5\right)} \cdot {\left(y + x\right)}^{x} \]
(FPCore (x y)
  :precision binary64
  (* (pow (+ y x) (- y 0.5)) (pow (+ y x) x)))
double code(double x, double y) {
	return pow((y + x), (y - 0.5)) * pow((y + x), x);
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = ((y + x) ** (y - 0.5d0)) * ((y + x) ** x)
end function
public static double code(double x, double y) {
	return Math.pow((y + x), (y - 0.5)) * Math.pow((y + x), x);
}
def code(x, y):
	return math.pow((y + x), (y - 0.5)) * math.pow((y + x), x)
function code(x, y)
	return Float64((Float64(y + x) ^ Float64(y - 0.5)) * (Float64(y + x) ^ x))
end
function tmp = code(x, y)
	tmp = ((y + x) ^ (y - 0.5)) * ((y + x) ^ x);
end
code[x_, y_] := N[(N[Power[N[(y + x), $MachinePrecision], N[(y - 0.5), $MachinePrecision]], $MachinePrecision] * N[Power[N[(y + x), $MachinePrecision], x], $MachinePrecision]), $MachinePrecision]
{\left(y + x\right)}^{\left(y - 0.5\right)} \cdot {\left(y + x\right)}^{x}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x + y\right)} - \frac{1}{2}\right)} \]
    4. associate--l+N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x + \left(y - \frac{1}{2}\right)\right)}} \]
    5. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(y - \frac{1}{2}\right) + x\right)}} \]
    6. pow-addN/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x}} \]
    7. lower-unsound-*.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x}} \]
    8. lower-unsound-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(y - \frac{1}{2}\right)}} \cdot {\left(x + y\right)}^{x} \]
    9. lift-+.f64N/A

      \[\leadsto {\color{blue}{\left(x + y\right)}}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x} \]
    10. +-commutativeN/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x} \]
    11. lower-+.f64N/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(y - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{x} \]
    12. lower--.f64N/A

      \[\leadsto {\left(y + x\right)}^{\color{blue}{\left(y - \frac{1}{2}\right)}} \cdot {\left(x + y\right)}^{x} \]
    13. lift-/.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \color{blue}{\frac{1}{2}}\right)} \cdot {\left(x + y\right)}^{x} \]
    14. metadata-evalN/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \color{blue}{\frac{1}{2}}\right)} \cdot {\left(x + y\right)}^{x} \]
    15. lower-unsound-pow.f6497.8%

      \[\leadsto {\left(y + x\right)}^{\left(y - 0.5\right)} \cdot \color{blue}{{\left(x + y\right)}^{x}} \]
    16. lift-+.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\color{blue}{\left(x + y\right)}}^{x} \]
    17. +-commutativeN/A

      \[\leadsto {\left(y + x\right)}^{\left(y - \frac{1}{2}\right)} \cdot {\color{blue}{\left(y + x\right)}}^{x} \]
    18. lower-+.f6497.8%

      \[\leadsto {\left(y + x\right)}^{\left(y - 0.5\right)} \cdot {\color{blue}{\left(y + x\right)}}^{x} \]
  3. Applied rewrites97.8%

    \[\leadsto \color{blue}{{\left(y + x\right)}^{\left(y - 0.5\right)} \cdot {\left(y + x\right)}^{x}} \]
  4. Add Preprocessing

Alternative 4: 97.7% accurate, 0.5× speedup?

\[\begin{array}{l} t_0 := \mathsf{max}\left(x, y\right) + \mathsf{min}\left(x, y\right)\\ {t\_0}^{\left(\mathsf{min}\left(x, y\right) - 0.5\right)} \cdot {t\_0}^{\left(\mathsf{max}\left(x, y\right)\right)} \end{array} \]
(FPCore (x y)
  :precision binary64
  (let* ((t_0 (+ (fmax x y) (fmin x y))))
  (* (pow t_0 (- (fmin x y) 0.5)) (pow t_0 (fmax x y)))))
double code(double x, double y) {
	double t_0 = fmax(x, y) + fmin(x, y);
	return pow(t_0, (fmin(x, y) - 0.5)) * pow(t_0, fmax(x, y));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    real(8) :: t_0
    t_0 = merge(y, merge(x, max(x, y), y /= y), x /= x) + merge(y, merge(x, min(x, y), y /= y), x /= x)
    code = (t_0 ** (merge(y, merge(x, min(x, y), y /= y), x /= x) - 0.5d0)) * (t_0 ** merge(y, merge(x, max(x, y), y /= y), x /= x))
end function
public static double code(double x, double y) {
	double t_0 = fmax(x, y) + fmin(x, y);
	return Math.pow(t_0, (fmin(x, y) - 0.5)) * Math.pow(t_0, fmax(x, y));
}
def code(x, y):
	t_0 = fmax(x, y) + fmin(x, y)
	return math.pow(t_0, (fmin(x, y) - 0.5)) * math.pow(t_0, fmax(x, y))
function code(x, y)
	t_0 = Float64(((x != x) ? y : ((y != y) ? x : max(x, y))) + ((x != x) ? y : ((y != y) ? x : min(x, y))))
	return Float64((t_0 ^ Float64(((x != x) ? y : ((y != y) ? x : min(x, y))) - 0.5)) * (t_0 ^ ((x != x) ? y : ((y != y) ? x : max(x, y)))))
end
function tmp = code(x, y)
	t_0 = max(x, y) + min(x, y);
	tmp = (t_0 ^ (min(x, y) - 0.5)) * (t_0 ^ max(x, y));
end
code[x_, y_] := Block[{t$95$0 = N[(N[Max[x, y], $MachinePrecision] + N[Min[x, y], $MachinePrecision]), $MachinePrecision]}, N[(N[Power[t$95$0, N[(N[Min[x, y], $MachinePrecision] - 0.5), $MachinePrecision]], $MachinePrecision] * N[Power[t$95$0, N[Max[x, y], $MachinePrecision]], $MachinePrecision]), $MachinePrecision]]
\begin{array}{l}
t_0 := \mathsf{max}\left(x, y\right) + \mathsf{min}\left(x, y\right)\\
{t\_0}^{\left(\mathsf{min}\left(x, y\right) - 0.5\right)} \cdot {t\_0}^{\left(\mathsf{max}\left(x, y\right)\right)}
\end{array}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x + y\right)} - \frac{1}{2}\right)} \]
    4. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(y + x\right)} - \frac{1}{2}\right)} \]
    5. associate--l+N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y + \left(x - \frac{1}{2}\right)\right)}} \]
    6. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x - \frac{1}{2}\right) + y\right)}} \]
    7. pow-addN/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(x - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{y}} \]
    8. lower-unsound-*.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(x - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{y}} \]
    9. lower-unsound-pow.f64N/A

      \[\leadsto \color{blue}{{\left(x + y\right)}^{\left(x - \frac{1}{2}\right)}} \cdot {\left(x + y\right)}^{y} \]
    10. lift-+.f64N/A

      \[\leadsto {\color{blue}{\left(x + y\right)}}^{\left(x - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{y} \]
    11. +-commutativeN/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(x - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{y} \]
    12. lower-+.f64N/A

      \[\leadsto {\color{blue}{\left(y + x\right)}}^{\left(x - \frac{1}{2}\right)} \cdot {\left(x + y\right)}^{y} \]
    13. lower--.f64N/A

      \[\leadsto {\left(y + x\right)}^{\color{blue}{\left(x - \frac{1}{2}\right)}} \cdot {\left(x + y\right)}^{y} \]
    14. lift-/.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(x - \color{blue}{\frac{1}{2}}\right)} \cdot {\left(x + y\right)}^{y} \]
    15. metadata-evalN/A

      \[\leadsto {\left(y + x\right)}^{\left(x - \color{blue}{\frac{1}{2}}\right)} \cdot {\left(x + y\right)}^{y} \]
    16. lower-unsound-pow.f6497.7%

      \[\leadsto {\left(y + x\right)}^{\left(x - 0.5\right)} \cdot \color{blue}{{\left(x + y\right)}^{y}} \]
    17. lift-+.f64N/A

      \[\leadsto {\left(y + x\right)}^{\left(x - \frac{1}{2}\right)} \cdot {\color{blue}{\left(x + y\right)}}^{y} \]
    18. +-commutativeN/A

      \[\leadsto {\left(y + x\right)}^{\left(x - \frac{1}{2}\right)} \cdot {\color{blue}{\left(y + x\right)}}^{y} \]
    19. lower-+.f6497.7%

      \[\leadsto {\left(y + x\right)}^{\left(x - 0.5\right)} \cdot {\color{blue}{\left(y + x\right)}}^{y} \]
  3. Applied rewrites97.7%

    \[\leadsto \color{blue}{{\left(y + x\right)}^{\left(x - 0.5\right)} \cdot {\left(y + x\right)}^{y}} \]
  4. Add Preprocessing

Alternative 5: 96.6% accurate, 1.0× speedup?

\[{\left(x + y\right)}^{\left(\left(-0.25 + y\right) - \left(0.25 - x\right)\right)} \]
(FPCore (x y)
  :precision binary64
  (pow (+ x y) (- (+ -0.25 y) (- 0.25 x))))
double code(double x, double y) {
	return pow((x + y), ((-0.25 + y) - (0.25 - x)));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (x + y) ** (((-0.25d0) + y) - (0.25d0 - x))
end function
public static double code(double x, double y) {
	return Math.pow((x + y), ((-0.25 + y) - (0.25 - x)));
}
def code(x, y):
	return math.pow((x + y), ((-0.25 + y) - (0.25 - x)))
function code(x, y)
	return Float64(x + y) ^ Float64(Float64(-0.25 + y) - Float64(0.25 - x))
end
function tmp = code(x, y)
	tmp = (x + y) ^ ((-0.25 + y) - (0.25 - x));
end
code[x_, y_] := N[Power[N[(x + y), $MachinePrecision], N[(N[(-0.25 + y), $MachinePrecision] - N[(0.25 - x), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
{\left(x + y\right)}^{\left(\left(-0.25 + y\right) - \left(0.25 - x\right)\right)}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. sub-negate-revN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\mathsf{neg}\left(\left(\frac{1}{2} - \left(x + y\right)\right)\right)\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\mathsf{neg}\left(\left(\frac{1}{2} - \color{blue}{\left(x + y\right)}\right)\right)\right)} \]
    4. associate--r+N/A

      \[\leadsto {\left(x + y\right)}^{\left(\mathsf{neg}\left(\color{blue}{\left(\left(\frac{1}{2} - x\right) - y\right)}\right)\right)} \]
    5. sub-negateN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    6. lower--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    7. lower--.f6496.6%

      \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{\left(\frac{1}{2} - x\right)}\right)} \]
    8. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\color{blue}{\frac{1}{2}} - x\right)\right)} \]
    9. metadata-eval96.6%

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\color{blue}{0.5} - x\right)\right)} \]
  3. Applied rewrites96.6%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(0.5 - x\right)\right)}} \]
  4. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    2. sub-flipN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y + \left(\mathsf{neg}\left(\left(\frac{1}{2} - x\right)\right)\right)\right)}} \]
    3. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y + \left(\mathsf{neg}\left(\color{blue}{\left(\frac{1}{2} - x\right)}\right)\right)\right)} \]
    4. sub-negate-revN/A

      \[\leadsto {\left(x + y\right)}^{\left(y + \color{blue}{\left(x - \frac{1}{2}\right)}\right)} \]
    5. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y + \color{blue}{\left(x - \frac{1}{2}\right)}\right)} \]
    6. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x - \frac{1}{2}\right) + y\right)}} \]
    7. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x - \frac{1}{2}\right)} + y\right)} \]
    8. associate--r-N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x - \left(\frac{1}{2} - y\right)\right)}} \]
    9. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(x - \color{blue}{\left(\frac{1}{2} - y\right)}\right)} \]
    10. sub-to-mult-revN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(1 - \frac{\frac{1}{2} - y}{x}\right) \cdot x\right)}} \]
    11. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(1 - \color{blue}{\frac{\frac{1}{2} - y}{x}}\right) \cdot x\right)} \]
    12. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(1 - \frac{\frac{1}{2} - y}{x}\right)} \cdot x\right)} \]
    13. *-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x \cdot \left(1 - \frac{\frac{1}{2} - y}{x}\right)\right)}} \]
    14. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(x \cdot \color{blue}{\left(1 - \frac{\frac{1}{2} - y}{x}\right)}\right)} \]
    15. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(x \cdot \left(1 - \color{blue}{\frac{\frac{1}{2} - y}{x}}\right)\right)} \]
    16. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(x \cdot \left(1 - \frac{\color{blue}{\frac{1}{2} - y}}{x}\right)\right)} \]
    17. div-subN/A

      \[\leadsto {\left(x + y\right)}^{\left(x \cdot \left(1 - \color{blue}{\left(\frac{\frac{1}{2}}{x} - \frac{y}{x}\right)}\right)\right)} \]
    18. associate--r-N/A

      \[\leadsto {\left(x + y\right)}^{\left(x \cdot \color{blue}{\left(\left(1 - \frac{\frac{1}{2}}{x}\right) + \frac{y}{x}\right)}\right)} \]
    19. distribute-rgt-inN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(1 - \frac{\frac{1}{2}}{x}\right) \cdot x + \frac{y}{x} \cdot x\right)}} \]
    20. lower-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(1 - \frac{\frac{1}{2}}{x}\right) \cdot x + \frac{y}{x} \cdot x\right)}} \]
  5. Applied rewrites96.6%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x - 0.5\right) + \frac{y}{x} \cdot x\right)}} \]
  6. Step-by-step derivation
    1. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x - \frac{1}{2}\right) + \frac{y}{x} \cdot x\right)}} \]
    2. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\frac{y}{x} \cdot x + \left(x - \frac{1}{2}\right)\right)}} \]
    3. add-flipN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\frac{y}{x} \cdot x - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)}} \]
    4. lift-*.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\frac{y}{x} \cdot x} - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)} \]
    5. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\frac{y}{x}} \cdot x - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)} \]
    6. associate-*l/N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\frac{y \cdot x}{x}} - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)} \]
    7. associate-/l*N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{y \cdot \frac{x}{x}} - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)} \]
    8. *-inversesN/A

      \[\leadsto {\left(x + y\right)}^{\left(y \cdot \color{blue}{1} - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)} \]
    9. *-rgt-identityN/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{y} - \left(\mathsf{neg}\left(\left(x - \frac{1}{2}\right)\right)\right)\right)} \]
    10. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\mathsf{neg}\left(\color{blue}{\left(x - \frac{1}{2}\right)}\right)\right)\right)} \]
    11. sub-negate-revN/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{\left(\frac{1}{2} - x\right)}\right)} \]
    12. associate--r-N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(y - \frac{1}{2}\right) + x\right)}} \]
    13. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(y - \frac{1}{2}\right)} + x\right)} \]
    14. *-rgt-identityN/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(y - \frac{1}{2}\right) \cdot 1} + x\right)} \]
    15. metadata-evalN/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(y - \frac{1}{2}\right) \cdot \color{blue}{\left(\frac{1}{2} + \frac{1}{2}\right)} + x\right)} \]
    16. distribute-lft-outN/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(\left(y - \frac{1}{2}\right) \cdot \frac{1}{2} + \left(y - \frac{1}{2}\right) \cdot \frac{1}{2}\right)} + x\right)} \]
    17. metadata-evalN/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\left(y - \frac{1}{2}\right) \cdot \color{blue}{\frac{1}{2}} + \left(y - \frac{1}{2}\right) \cdot \frac{1}{2}\right) + x\right)} \]
    18. mult-flipN/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\color{blue}{\frac{y - \frac{1}{2}}{2}} + \left(y - \frac{1}{2}\right) \cdot \frac{1}{2}\right) + x\right)} \]
    19. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\color{blue}{\frac{y - \frac{1}{2}}{2}} + \left(y - \frac{1}{2}\right) \cdot \frac{1}{2}\right) + x\right)} \]
    20. metadata-evalN/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\frac{y - \frac{1}{2}}{2} + \left(y - \frac{1}{2}\right) \cdot \color{blue}{\frac{1}{2}}\right) + x\right)} \]
    21. mult-flipN/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\frac{y - \frac{1}{2}}{2} + \color{blue}{\frac{y - \frac{1}{2}}{2}}\right) + x\right)} \]
    22. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\frac{y - \frac{1}{2}}{2} + \color{blue}{\frac{y - \frac{1}{2}}{2}}\right) + x\right)} \]
    23. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\frac{y - \frac{1}{2}}{2} + \color{blue}{\frac{y - \frac{1}{2}}{2}}\right) + x\right)} \]
    24. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\frac{y - \frac{1}{2}}{2} + \frac{\color{blue}{y - \frac{1}{2}}}{2}\right) + x\right)} \]
    25. div-subN/A

      \[\leadsto {\left(x + y\right)}^{\left(\left(\frac{y - \frac{1}{2}}{2} + \color{blue}{\left(\frac{y}{2} - \frac{\frac{1}{2}}{2}\right)}\right) + x\right)} \]
    26. associate-+r-N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(\left(\frac{y - \frac{1}{2}}{2} + \frac{y}{2}\right) - \frac{\frac{1}{2}}{2}\right)} + x\right)} \]
  7. Applied rewrites96.6%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(-0.25 + y\right) - \left(0.25 - x\right)\right)}} \]
  8. Add Preprocessing

Alternative 6: 96.6% accurate, 0.8× speedup?

\[{\left(\mathsf{min}\left(x, y\right) + \mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{min}\left(x, y\right) - \left(0.5 - \mathsf{max}\left(x, y\right)\right)\right)} \]
(FPCore (x y)
  :precision binary64
  (pow (+ (fmin x y) (fmax x y)) (- (fmin x y) (- 0.5 (fmax x y)))))
double code(double x, double y) {
	return pow((fmin(x, y) + fmax(x, y)), (fmin(x, y) - (0.5 - fmax(x, y))));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (merge(y, merge(x, min(x, y), y /= y), x /= x) + merge(y, merge(x, max(x, y), y /= y), x /= x)) ** (merge(y, merge(x, min(x, y), y /= y), x /= x) - (0.5d0 - merge(y, merge(x, max(x, y), y /= y), x /= x)))
end function
public static double code(double x, double y) {
	return Math.pow((fmin(x, y) + fmax(x, y)), (fmin(x, y) - (0.5 - fmax(x, y))));
}
def code(x, y):
	return math.pow((fmin(x, y) + fmax(x, y)), (fmin(x, y) - (0.5 - fmax(x, y))))
function code(x, y)
	return Float64(((x != x) ? y : ((y != y) ? x : min(x, y))) + ((x != x) ? y : ((y != y) ? x : max(x, y)))) ^ Float64(((x != x) ? y : ((y != y) ? x : min(x, y))) - Float64(0.5 - ((x != x) ? y : ((y != y) ? x : max(x, y)))))
end
function tmp = code(x, y)
	tmp = (min(x, y) + max(x, y)) ^ (min(x, y) - (0.5 - max(x, y)));
end
code[x_, y_] := N[Power[N[(N[Min[x, y], $MachinePrecision] + N[Max[x, y], $MachinePrecision]), $MachinePrecision], N[(N[Min[x, y], $MachinePrecision] - N[(0.5 - N[Max[x, y], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]
{\left(\mathsf{min}\left(x, y\right) + \mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{min}\left(x, y\right) - \left(0.5 - \mathsf{max}\left(x, y\right)\right)\right)}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. sub-negate-revN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\mathsf{neg}\left(\left(\frac{1}{2} - \left(x + y\right)\right)\right)\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\mathsf{neg}\left(\left(\frac{1}{2} - \color{blue}{\left(x + y\right)}\right)\right)\right)} \]
    4. associate--r+N/A

      \[\leadsto {\left(x + y\right)}^{\left(\mathsf{neg}\left(\color{blue}{\left(\left(\frac{1}{2} - x\right) - y\right)}\right)\right)} \]
    5. sub-negateN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    6. lower--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    7. lower--.f6496.6%

      \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{\left(\frac{1}{2} - x\right)}\right)} \]
    8. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\color{blue}{\frac{1}{2}} - x\right)\right)} \]
    9. metadata-eval96.6%

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\color{blue}{0.5} - x\right)\right)} \]
  3. Applied rewrites96.6%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(0.5 - x\right)\right)}} \]
  4. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    2. sub-flipN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y + \left(\mathsf{neg}\left(\left(\frac{1}{2} - x\right)\right)\right)\right)}} \]
    3. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y + \left(\mathsf{neg}\left(\color{blue}{\left(\frac{1}{2} - x\right)}\right)\right)\right)} \]
    4. sub-negate-revN/A

      \[\leadsto {\left(x + y\right)}^{\left(y + \color{blue}{\left(x - \frac{1}{2}\right)}\right)} \]
    5. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y + \color{blue}{\left(x - \frac{1}{2}\right)}\right)} \]
    6. +-commutativeN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x - \frac{1}{2}\right) + y\right)}} \]
    7. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\color{blue}{\left(x - \frac{1}{2}\right)} + y\right)} \]
    8. associate--r-N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x - \left(\frac{1}{2} - y\right)\right)}} \]
    9. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(x - \color{blue}{\left(\frac{1}{2} - y\right)}\right)} \]
    10. lower--.f6496.6%

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x - \left(0.5 - y\right)\right)}} \]
  5. Applied rewrites96.6%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(x - \left(0.5 - y\right)\right)}} \]
  6. Add Preprocessing

Alternative 7: 23.4% accurate, 0.9× speedup?

\[{\left(\mathsf{min}\left(x, y\right) + \mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{max}\left(x, y\right) - 0.5\right)} \]
(FPCore (x y)
  :precision binary64
  (pow (+ (fmin x y) (fmax x y)) (- (fmax x y) 0.5)))
double code(double x, double y) {
	return pow((fmin(x, y) + fmax(x, y)), (fmax(x, y) - 0.5));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = (merge(y, merge(x, min(x, y), y /= y), x /= x) + merge(y, merge(x, max(x, y), y /= y), x /= x)) ** (merge(y, merge(x, max(x, y), y /= y), x /= x) - 0.5d0)
end function
public static double code(double x, double y) {
	return Math.pow((fmin(x, y) + fmax(x, y)), (fmax(x, y) - 0.5));
}
def code(x, y):
	return math.pow((fmin(x, y) + fmax(x, y)), (fmax(x, y) - 0.5))
function code(x, y)
	return Float64(((x != x) ? y : ((y != y) ? x : min(x, y))) + ((x != x) ? y : ((y != y) ? x : max(x, y)))) ^ Float64(((x != x) ? y : ((y != y) ? x : max(x, y))) - 0.5)
end
function tmp = code(x, y)
	tmp = (min(x, y) + max(x, y)) ^ (max(x, y) - 0.5);
end
code[x_, y_] := N[Power[N[(N[Min[x, y], $MachinePrecision] + N[Max[x, y], $MachinePrecision]), $MachinePrecision], N[(N[Max[x, y], $MachinePrecision] - 0.5), $MachinePrecision]], $MachinePrecision]
{\left(\mathsf{min}\left(x, y\right) + \mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{max}\left(x, y\right) - 0.5\right)}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Taylor expanded in x around 0

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \frac{1}{2}\right)}} \]
  3. Step-by-step derivation
    1. metadata-evalN/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \frac{1}{\color{blue}{2}}\right)} \]
    2. lower--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{\frac{1}{2}}\right)} \]
    3. metadata-eval19.2%

      \[\leadsto {\left(x + y\right)}^{\left(y - 0.5\right)} \]
  4. Applied rewrites19.2%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - 0.5\right)}} \]
  5. Add Preprocessing

Alternative 8: 21.9% accurate, 1.1× speedup?

\[{\left(\mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{max}\left(x, y\right) - 0.5\right)} \]
(FPCore (x y)
  :precision binary64
  (pow (fmax x y) (- (fmax x y) 0.5)))
double code(double x, double y) {
	return pow(fmax(x, y), (fmax(x, y) - 0.5));
}
real(8) function code(x, y)
    real(8), intent (in) :: x
    real(8), intent (in) :: y
    code = merge(y, merge(x, max(x, y), y /= y), x /= x) ** (merge(y, merge(x, max(x, y), y /= y), x /= x) - 0.5d0)
end function
public static double code(double x, double y) {
	return Math.pow(fmax(x, y), (fmax(x, y) - 0.5));
}
def code(x, y):
	return math.pow(fmax(x, y), (fmax(x, y) - 0.5))
function code(x, y)
	return ((x != x) ? y : ((y != y) ? x : max(x, y))) ^ Float64(((x != x) ? y : ((y != y) ? x : max(x, y))) - 0.5)
end
function tmp = code(x, y)
	tmp = max(x, y) ^ (max(x, y) - 0.5);
end
code[x_, y_] := N[Power[N[Max[x, y], $MachinePrecision], N[(N[Max[x, y], $MachinePrecision] - 0.5), $MachinePrecision]], $MachinePrecision]
{\left(\mathsf{max}\left(x, y\right)\right)}^{\left(\mathsf{max}\left(x, y\right) - 0.5\right)}
Derivation
  1. Initial program 96.6%

    \[{\left(x + y\right)}^{\left(\left(x + y\right) - \frac{1}{2}\right)} \]
  2. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\left(x + y\right) - \frac{1}{2}\right)}} \]
    2. sub-negate-revN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(\mathsf{neg}\left(\left(\frac{1}{2} - \left(x + y\right)\right)\right)\right)}} \]
    3. lift-+.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(\mathsf{neg}\left(\left(\frac{1}{2} - \color{blue}{\left(x + y\right)}\right)\right)\right)} \]
    4. associate--r+N/A

      \[\leadsto {\left(x + y\right)}^{\left(\mathsf{neg}\left(\color{blue}{\left(\left(\frac{1}{2} - x\right) - y\right)}\right)\right)} \]
    5. sub-negateN/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    6. lower--.f64N/A

      \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(\frac{1}{2} - x\right)\right)}} \]
    7. lower--.f6496.6%

      \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{\left(\frac{1}{2} - x\right)}\right)} \]
    8. lift-/.f64N/A

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\color{blue}{\frac{1}{2}} - x\right)\right)} \]
    9. metadata-eval96.6%

      \[\leadsto {\left(x + y\right)}^{\left(y - \left(\color{blue}{0.5} - x\right)\right)} \]
  3. Applied rewrites96.6%

    \[\leadsto {\left(x + y\right)}^{\color{blue}{\left(y - \left(0.5 - x\right)\right)}} \]
  4. Taylor expanded in x around 0

    \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{\frac{1}{2}}\right)} \]
  5. Step-by-step derivation
    1. Applied rewrites19.2%

      \[\leadsto {\left(x + y\right)}^{\left(y - \color{blue}{0.5}\right)} \]
    2. Taylor expanded in x around 0

      \[\leadsto {\color{blue}{y}}^{\left(y - 0.5\right)} \]
    3. Step-by-step derivation
      1. Applied rewrites17.6%

        \[\leadsto {\color{blue}{y}}^{\left(y - 0.5\right)} \]
      2. Add Preprocessing

      Reproduce

      ?
      herbie shell --seed 1 
      (FPCore (x y)
        :name "pow(x+y,x+y-1/2)"
        :precision binary64
        :pre (and (and (<= 0.001 x) (<= x 100.0)) (and (<= 0.001 y) (<= y 100.0)))
        (pow (+ x y) (- (+ x y) (/ 1.0 2.0))))