a + log(1 - exp(a)/exp(b))

Percentage Accurate: 13.6% → 100.0%
Time: 9.6s
Alternatives: 7
Speedup: 1.5×

Specification

?
\[\left(-2.2 \cdot 10^{+16} \leq a \land a \leq 1\right) \land \left(-2.2 \cdot 10^{+16} \leq b \land b \leq 1\right)\]
\[\begin{array}{l} \\ a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \end{array} \]
(FPCore (a b) :precision binary64 (+ a (log (- 1.0 (/ (exp a) (exp b))))))
double code(double a, double b) {
	return a + log((1.0 - (exp(a) / exp(b))));
}
real(8) function code(a, b)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = a + log((1.0d0 - (exp(a) / exp(b))))
end function
public static double code(double a, double b) {
	return a + Math.log((1.0 - (Math.exp(a) / Math.exp(b))));
}
def code(a, b):
	return a + math.log((1.0 - (math.exp(a) / math.exp(b))))
function code(a, b)
	return Float64(a + log(Float64(1.0 - Float64(exp(a) / exp(b)))))
end
function tmp = code(a, b)
	tmp = a + log((1.0 - (exp(a) / exp(b))));
end
code[a_, b_] := N[(a + N[Log[N[(1.0 - N[(N[Exp[a], $MachinePrecision] / N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a + \log \left(1 - \frac{e^{a}}{e^{b}}\right)
\end{array}

Sampling outcomes in binary64 precision:

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 7 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 13.6% accurate, 1.0× speedup?

\[\begin{array}{l} \\ a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \end{array} \]
(FPCore (a b) :precision binary64 (+ a (log (- 1.0 (/ (exp a) (exp b))))))
double code(double a, double b) {
	return a + log((1.0 - (exp(a) / exp(b))));
}
real(8) function code(a, b)
    real(8), intent (in) :: a
    real(8), intent (in) :: b
    code = a + log((1.0d0 - (exp(a) / exp(b))))
end function
public static double code(double a, double b) {
	return a + Math.log((1.0 - (Math.exp(a) / Math.exp(b))));
}
def code(a, b):
	return a + math.log((1.0 - (math.exp(a) / math.exp(b))))
function code(a, b)
	return Float64(a + log(Float64(1.0 - Float64(exp(a) / exp(b)))))
end
function tmp = code(a, b)
	tmp = a + log((1.0 - (exp(a) / exp(b))));
end
code[a_, b_] := N[(a + N[Log[N[(1.0 - N[(N[Exp[a], $MachinePrecision] / N[Exp[b], $MachinePrecision]), $MachinePrecision]), $MachinePrecision]], $MachinePrecision]), $MachinePrecision]
\begin{array}{l}

\\
a + \log \left(1 - \frac{e^{a}}{e^{b}}\right)
\end{array}

Alternative 1: 100.0% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \log \left(-\mathsf{expm1}\left(a - b\right)\right) + a \end{array} \]
(FPCore (a b) :precision binary64 (+ (log (- (expm1 (- a b)))) a))
double code(double a, double b) {
	return log(-expm1((a - b))) + a;
}
public static double code(double a, double b) {
	return Math.log(-Math.expm1((a - b))) + a;
}
def code(a, b):
	return math.log(-math.expm1((a - b))) + a
function code(a, b)
	return Float64(log(Float64(-expm1(Float64(a - b)))) + a)
end
code[a_, b_] := N[(N[Log[(-N[(Exp[N[(a - b), $MachinePrecision]] - 1), $MachinePrecision])], $MachinePrecision] + a), $MachinePrecision]
\begin{array}{l}

\\
\log \left(-\mathsf{expm1}\left(a - b\right)\right) + a
\end{array}
Derivation
  1. Initial program 15.1%

    \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto a + \log \color{blue}{\left(1 - \frac{e^{a}}{e^{b}}\right)} \]
    2. sub-negN/A

      \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right)\right)} \]
    3. +-commutativeN/A

      \[\leadsto a + \log \color{blue}{\left(\left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right) + 1\right)} \]
    4. neg-sub0N/A

      \[\leadsto a + \log \left(\color{blue}{\left(0 - \frac{e^{a}}{e^{b}}\right)} + 1\right) \]
    5. associate-+l-N/A

      \[\leadsto a + \log \color{blue}{\left(0 - \left(\frac{e^{a}}{e^{b}} - 1\right)\right)} \]
    6. lower--.f64N/A

      \[\leadsto a + \log \color{blue}{\left(0 - \left(\frac{e^{a}}{e^{b}} - 1\right)\right)} \]
    7. lift-/.f64N/A

      \[\leadsto a + \log \left(0 - \left(\color{blue}{\frac{e^{a}}{e^{b}}} - 1\right)\right) \]
    8. lift-exp.f64N/A

      \[\leadsto a + \log \left(0 - \left(\frac{\color{blue}{e^{a}}}{e^{b}} - 1\right)\right) \]
    9. lift-exp.f64N/A

      \[\leadsto a + \log \left(0 - \left(\frac{e^{a}}{\color{blue}{e^{b}}} - 1\right)\right) \]
    10. div-expN/A

      \[\leadsto a + \log \left(0 - \left(\color{blue}{e^{a - b}} - 1\right)\right) \]
    11. lower-expm1.f64N/A

      \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a - b\right)}\right) \]
    12. lower--.f64100.0

      \[\leadsto a + \log \left(0 - \mathsf{expm1}\left(\color{blue}{a - b}\right)\right) \]
  4. Applied rewrites100.0%

    \[\leadsto a + \log \color{blue}{\left(0 - \mathsf{expm1}\left(a - b\right)\right)} \]
  5. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto a + \log \color{blue}{\left(0 - \mathsf{expm1}\left(a - b\right)\right)} \]
    2. sub0-negN/A

      \[\leadsto a + \log \color{blue}{\left(\mathsf{neg}\left(\mathsf{expm1}\left(a - b\right)\right)\right)} \]
    3. lower-neg.f64100.0

      \[\leadsto a + \log \color{blue}{\left(-\mathsf{expm1}\left(a - b\right)\right)} \]
  6. Applied rewrites100.0%

    \[\leadsto a + \log \color{blue}{\left(-\mathsf{expm1}\left(a - b\right)\right)} \]
  7. Final simplification100.0%

    \[\leadsto \log \left(-\mathsf{expm1}\left(a - b\right)\right) + a \]
  8. Add Preprocessing

Alternative 2: 58.2% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \begin{array}{l} \mathbf{if}\;b \leq 3.8 \cdot 10^{-18}:\\ \;\;\;\;\log \left(-\mathsf{expm1}\left(a\right)\right) + a\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(-e^{-b}\right) + a\\ \end{array} \end{array} \]
(FPCore (a b)
 :precision binary64
 (if (<= b 3.8e-18) (+ (log (- (expm1 a))) a) (+ (log1p (- (exp (- b)))) a)))
double code(double a, double b) {
	double tmp;
	if (b <= 3.8e-18) {
		tmp = log(-expm1(a)) + a;
	} else {
		tmp = log1p(-exp(-b)) + a;
	}
	return tmp;
}
public static double code(double a, double b) {
	double tmp;
	if (b <= 3.8e-18) {
		tmp = Math.log(-Math.expm1(a)) + a;
	} else {
		tmp = Math.log1p(-Math.exp(-b)) + a;
	}
	return tmp;
}
def code(a, b):
	tmp = 0
	if b <= 3.8e-18:
		tmp = math.log(-math.expm1(a)) + a
	else:
		tmp = math.log1p(-math.exp(-b)) + a
	return tmp
function code(a, b)
	tmp = 0.0
	if (b <= 3.8e-18)
		tmp = Float64(log(Float64(-expm1(a))) + a);
	else
		tmp = Float64(log1p(Float64(-exp(Float64(-b)))) + a);
	end
	return tmp
end
code[a_, b_] := If[LessEqual[b, 3.8e-18], N[(N[Log[(-N[(Exp[a] - 1), $MachinePrecision])], $MachinePrecision] + a), $MachinePrecision], N[(N[Log[1 + (-N[Exp[(-b)], $MachinePrecision])], $MachinePrecision] + a), $MachinePrecision]]
\begin{array}{l}

\\
\begin{array}{l}
\mathbf{if}\;b \leq 3.8 \cdot 10^{-18}:\\
\;\;\;\;\log \left(-\mathsf{expm1}\left(a\right)\right) + a\\

\mathbf{else}:\\
\;\;\;\;\mathsf{log1p}\left(-e^{-b}\right) + a\\


\end{array}
\end{array}
Derivation
  1. Split input into 2 regimes
  2. if b < 3.7999999999999998e-18

    1. Initial program 10.9%

      \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift--.f64N/A

        \[\leadsto a + \log \color{blue}{\left(1 - \frac{e^{a}}{e^{b}}\right)} \]
      2. sub-negN/A

        \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right)\right)} \]
      3. +-commutativeN/A

        \[\leadsto a + \log \color{blue}{\left(\left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right) + 1\right)} \]
      4. neg-sub0N/A

        \[\leadsto a + \log \left(\color{blue}{\left(0 - \frac{e^{a}}{e^{b}}\right)} + 1\right) \]
      5. associate-+l-N/A

        \[\leadsto a + \log \color{blue}{\left(0 - \left(\frac{e^{a}}{e^{b}} - 1\right)\right)} \]
      6. lower--.f64N/A

        \[\leadsto a + \log \color{blue}{\left(0 - \left(\frac{e^{a}}{e^{b}} - 1\right)\right)} \]
      7. lift-/.f64N/A

        \[\leadsto a + \log \left(0 - \left(\color{blue}{\frac{e^{a}}{e^{b}}} - 1\right)\right) \]
      8. lift-exp.f64N/A

        \[\leadsto a + \log \left(0 - \left(\frac{\color{blue}{e^{a}}}{e^{b}} - 1\right)\right) \]
      9. lift-exp.f64N/A

        \[\leadsto a + \log \left(0 - \left(\frac{e^{a}}{\color{blue}{e^{b}}} - 1\right)\right) \]
      10. div-expN/A

        \[\leadsto a + \log \left(0 - \left(\color{blue}{e^{a - b}} - 1\right)\right) \]
      11. lower-expm1.f64N/A

        \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a - b\right)}\right) \]
      12. lower--.f64100.0

        \[\leadsto a + \log \left(0 - \mathsf{expm1}\left(\color{blue}{a - b}\right)\right) \]
    4. Applied rewrites100.0%

      \[\leadsto a + \log \color{blue}{\left(0 - \mathsf{expm1}\left(a - b\right)\right)} \]
    5. Taylor expanded in b around 0

      \[\leadsto a + \log \left(0 - \color{blue}{\left(e^{a} - 1\right)}\right) \]
    6. Step-by-step derivation
      1. lower-expm1.f6457.0

        \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a\right)}\right) \]
    7. Applied rewrites57.0%

      \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a\right)}\right) \]

    if 3.7999999999999998e-18 < b

    1. Initial program 66.5%

      \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
    2. Add Preprocessing
    3. Step-by-step derivation
      1. lift-log.f64N/A

        \[\leadsto a + \color{blue}{\log \left(1 - \frac{e^{a}}{e^{b}}\right)} \]
      2. lift--.f64N/A

        \[\leadsto a + \log \color{blue}{\left(1 - \frac{e^{a}}{e^{b}}\right)} \]
      3. sub-negN/A

        \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right)\right)} \]
      4. lower-log1p.f64N/A

        \[\leadsto a + \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right)} \]
      5. lower-neg.f6466.5

        \[\leadsto a + \mathsf{log1p}\left(\color{blue}{-\frac{e^{a}}{e^{b}}}\right) \]
      6. lift-/.f64N/A

        \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{\frac{e^{a}}{e^{b}}}\right) \]
      7. lift-exp.f64N/A

        \[\leadsto a + \mathsf{log1p}\left(-\frac{\color{blue}{e^{a}}}{e^{b}}\right) \]
      8. lift-exp.f64N/A

        \[\leadsto a + \mathsf{log1p}\left(-\frac{e^{a}}{\color{blue}{e^{b}}}\right) \]
      9. div-expN/A

        \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{a - b}}\right) \]
      10. lower-exp.f64N/A

        \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{a - b}}\right) \]
      11. lower--.f6467.3

        \[\leadsto a + \mathsf{log1p}\left(-e^{\color{blue}{a - b}}\right) \]
    4. Applied rewrites67.3%

      \[\leadsto a + \color{blue}{\mathsf{log1p}\left(-e^{a - b}\right)} \]
    5. Taylor expanded in a around 0

      \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{\mathsf{neg}\left(b\right)}}\right) \]
    6. Step-by-step derivation
      1. lower-exp.f64N/A

        \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{\mathsf{neg}\left(b\right)}}\right) \]
      2. lower-neg.f6462.9

        \[\leadsto a + \mathsf{log1p}\left(-e^{\color{blue}{-b}}\right) \]
    7. Applied rewrites62.9%

      \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{-b}}\right) \]
  3. Recombined 2 regimes into one program.
  4. Final simplification57.5%

    \[\leadsto \begin{array}{l} \mathbf{if}\;b \leq 3.8 \cdot 10^{-18}:\\ \;\;\;\;\log \left(-\mathsf{expm1}\left(a\right)\right) + a\\ \mathbf{else}:\\ \;\;\;\;\mathsf{log1p}\left(-e^{-b}\right) + a\\ \end{array} \]
  5. Add Preprocessing

Alternative 3: 55.7% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \log \left(-\mathsf{expm1}\left(a\right)\right) + a \end{array} \]
(FPCore (a b) :precision binary64 (+ (log (- (expm1 a))) a))
double code(double a, double b) {
	return log(-expm1(a)) + a;
}
public static double code(double a, double b) {
	return Math.log(-Math.expm1(a)) + a;
}
def code(a, b):
	return math.log(-math.expm1(a)) + a
function code(a, b)
	return Float64(log(Float64(-expm1(a))) + a)
end
code[a_, b_] := N[(N[Log[(-N[(Exp[a] - 1), $MachinePrecision])], $MachinePrecision] + a), $MachinePrecision]
\begin{array}{l}

\\
\log \left(-\mathsf{expm1}\left(a\right)\right) + a
\end{array}
Derivation
  1. Initial program 15.1%

    \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
  2. Add Preprocessing
  3. Step-by-step derivation
    1. lift--.f64N/A

      \[\leadsto a + \log \color{blue}{\left(1 - \frac{e^{a}}{e^{b}}\right)} \]
    2. sub-negN/A

      \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right)\right)} \]
    3. +-commutativeN/A

      \[\leadsto a + \log \color{blue}{\left(\left(\mathsf{neg}\left(\frac{e^{a}}{e^{b}}\right)\right) + 1\right)} \]
    4. neg-sub0N/A

      \[\leadsto a + \log \left(\color{blue}{\left(0 - \frac{e^{a}}{e^{b}}\right)} + 1\right) \]
    5. associate-+l-N/A

      \[\leadsto a + \log \color{blue}{\left(0 - \left(\frac{e^{a}}{e^{b}} - 1\right)\right)} \]
    6. lower--.f64N/A

      \[\leadsto a + \log \color{blue}{\left(0 - \left(\frac{e^{a}}{e^{b}} - 1\right)\right)} \]
    7. lift-/.f64N/A

      \[\leadsto a + \log \left(0 - \left(\color{blue}{\frac{e^{a}}{e^{b}}} - 1\right)\right) \]
    8. lift-exp.f64N/A

      \[\leadsto a + \log \left(0 - \left(\frac{\color{blue}{e^{a}}}{e^{b}} - 1\right)\right) \]
    9. lift-exp.f64N/A

      \[\leadsto a + \log \left(0 - \left(\frac{e^{a}}{\color{blue}{e^{b}}} - 1\right)\right) \]
    10. div-expN/A

      \[\leadsto a + \log \left(0 - \left(\color{blue}{e^{a - b}} - 1\right)\right) \]
    11. lower-expm1.f64N/A

      \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a - b\right)}\right) \]
    12. lower--.f64100.0

      \[\leadsto a + \log \left(0 - \mathsf{expm1}\left(\color{blue}{a - b}\right)\right) \]
  4. Applied rewrites100.0%

    \[\leadsto a + \log \color{blue}{\left(0 - \mathsf{expm1}\left(a - b\right)\right)} \]
  5. Taylor expanded in b around 0

    \[\leadsto a + \log \left(0 - \color{blue}{\left(e^{a} - 1\right)}\right) \]
  6. Step-by-step derivation
    1. lower-expm1.f6454.2

      \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a\right)}\right) \]
  7. Applied rewrites54.2%

    \[\leadsto a + \log \left(0 - \color{blue}{\mathsf{expm1}\left(a\right)}\right) \]
  8. Final simplification54.2%

    \[\leadsto \log \left(-\mathsf{expm1}\left(a\right)\right) + a \]
  9. Add Preprocessing

Alternative 4: 10.9% accurate, 1.5× speedup?

\[\begin{array}{l} \\ \mathsf{log1p}\left(-e^{a}\right) + a \end{array} \]
(FPCore (a b) :precision binary64 (+ (log1p (- (exp a))) a))
double code(double a, double b) {
	return log1p(-exp(a)) + a;
}
public static double code(double a, double b) {
	return Math.log1p(-Math.exp(a)) + a;
}
def code(a, b):
	return math.log1p(-math.exp(a)) + a
function code(a, b)
	return Float64(log1p(Float64(-exp(a))) + a)
end
code[a_, b_] := N[(N[Log[1 + (-N[Exp[a], $MachinePrecision])], $MachinePrecision] + a), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{log1p}\left(-e^{a}\right) + a
\end{array}
Derivation
  1. Initial program 15.1%

    \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in b around 0

    \[\leadsto a + \color{blue}{\log \left(1 - e^{a}\right)} \]
  4. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(e^{a}\right)\right)\right)} \]
    2. lower-log1p.f64N/A

      \[\leadsto a + \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(e^{a}\right)\right)} \]
    3. lower-neg.f64N/A

      \[\leadsto a + \mathsf{log1p}\left(\color{blue}{-e^{a}}\right) \]
    4. lower-exp.f6411.1

      \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{a}}\right) \]
  5. Applied rewrites11.1%

    \[\leadsto a + \color{blue}{\mathsf{log1p}\left(-e^{a}\right)} \]
  6. Final simplification11.1%

    \[\leadsto \mathsf{log1p}\left(-e^{a}\right) + a \]
  7. Add Preprocessing

Alternative 5: 8.0% accurate, 2.6× speedup?

\[\begin{array}{l} \\ \mathsf{log1p}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, a, -0.5\right), a, -1\right), a, -1\right)\right) + a \end{array} \]
(FPCore (a b)
 :precision binary64
 (+ (log1p (fma (fma (fma -0.16666666666666666 a -0.5) a -1.0) a -1.0)) a))
double code(double a, double b) {
	return log1p(fma(fma(fma(-0.16666666666666666, a, -0.5), a, -1.0), a, -1.0)) + a;
}
function code(a, b)
	return Float64(log1p(fma(fma(fma(-0.16666666666666666, a, -0.5), a, -1.0), a, -1.0)) + a)
end
code[a_, b_] := N[(N[Log[1 + N[(N[(N[(-0.16666666666666666 * a + -0.5), $MachinePrecision] * a + -1.0), $MachinePrecision] * a + -1.0), $MachinePrecision]], $MachinePrecision] + a), $MachinePrecision]
\begin{array}{l}

\\
\mathsf{log1p}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, a, -0.5\right), a, -1\right), a, -1\right)\right) + a
\end{array}
Derivation
  1. Initial program 15.1%

    \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
  2. Add Preprocessing
  3. Taylor expanded in b around 0

    \[\leadsto a + \color{blue}{\log \left(1 - e^{a}\right)} \]
  4. Step-by-step derivation
    1. sub-negN/A

      \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(e^{a}\right)\right)\right)} \]
    2. lower-log1p.f64N/A

      \[\leadsto a + \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(e^{a}\right)\right)} \]
    3. lower-neg.f64N/A

      \[\leadsto a + \mathsf{log1p}\left(\color{blue}{-e^{a}}\right) \]
    4. lower-exp.f6411.1

      \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{a}}\right) \]
  5. Applied rewrites11.1%

    \[\leadsto a + \color{blue}{\mathsf{log1p}\left(-e^{a}\right)} \]
  6. Taylor expanded in a around 0

    \[\leadsto a + \mathsf{log1p}\left(a \cdot \left(a \cdot \left(\frac{-1}{6} \cdot a - \frac{1}{2}\right) - 1\right) - 1\right) \]
  7. Step-by-step derivation
    1. Applied rewrites8.3%

      \[\leadsto a + \mathsf{log1p}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, a, -0.5\right), a, -1\right), a, -1\right)\right) \]
    2. Final simplification8.3%

      \[\leadsto \mathsf{log1p}\left(\mathsf{fma}\left(\mathsf{fma}\left(\mathsf{fma}\left(-0.16666666666666666, a, -0.5\right), a, -1\right), a, -1\right)\right) + a \]
    3. Add Preprocessing

    Alternative 6: 7.6% accurate, 3.0× speedup?

    \[\begin{array}{l} \\ \mathsf{log1p}\left(-1 - a\right) + a \end{array} \]
    (FPCore (a b) :precision binary64 (+ (log1p (- -1.0 a)) a))
    double code(double a, double b) {
    	return log1p((-1.0 - a)) + a;
    }
    
    public static double code(double a, double b) {
    	return Math.log1p((-1.0 - a)) + a;
    }
    
    def code(a, b):
    	return math.log1p((-1.0 - a)) + a
    
    function code(a, b)
    	return Float64(log1p(Float64(-1.0 - a)) + a)
    end
    
    code[a_, b_] := N[(N[Log[1 + N[(-1.0 - a), $MachinePrecision]], $MachinePrecision] + a), $MachinePrecision]
    
    \begin{array}{l}
    
    \\
    \mathsf{log1p}\left(-1 - a\right) + a
    \end{array}
    
    Derivation
    1. Initial program 15.1%

      \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
    2. Add Preprocessing
    3. Taylor expanded in b around 0

      \[\leadsto a + \color{blue}{\log \left(1 - e^{a}\right)} \]
    4. Step-by-step derivation
      1. sub-negN/A

        \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(e^{a}\right)\right)\right)} \]
      2. lower-log1p.f64N/A

        \[\leadsto a + \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(e^{a}\right)\right)} \]
      3. lower-neg.f64N/A

        \[\leadsto a + \mathsf{log1p}\left(\color{blue}{-e^{a}}\right) \]
      4. lower-exp.f6411.1

        \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{a}}\right) \]
    5. Applied rewrites11.1%

      \[\leadsto a + \color{blue}{\mathsf{log1p}\left(-e^{a}\right)} \]
    6. Taylor expanded in a around 0

      \[\leadsto a + \mathsf{log1p}\left(-1 \cdot a - 1\right) \]
    7. Step-by-step derivation
      1. Applied rewrites7.9%

        \[\leadsto a + \mathsf{log1p}\left(-1 - a\right) \]
      2. Final simplification7.9%

        \[\leadsto \mathsf{log1p}\left(-1 - a\right) + a \]
      3. Add Preprocessing

      Alternative 7: 3.1% accurate, 3.1× speedup?

      \[\begin{array}{l} \\ \mathsf{log1p}\left(-1\right) + a \end{array} \]
      (FPCore (a b) :precision binary64 (+ (log1p -1.0) a))
      double code(double a, double b) {
      	return log1p(-1.0) + a;
      }
      
      public static double code(double a, double b) {
      	return Math.log1p(-1.0) + a;
      }
      
      def code(a, b):
      	return math.log1p(-1.0) + a
      
      function code(a, b)
      	return Float64(log1p(-1.0) + a)
      end
      
      code[a_, b_] := N[(N[Log[1 + -1.0], $MachinePrecision] + a), $MachinePrecision]
      
      \begin{array}{l}
      
      \\
      \mathsf{log1p}\left(-1\right) + a
      \end{array}
      
      Derivation
      1. Initial program 15.1%

        \[a + \log \left(1 - \frac{e^{a}}{e^{b}}\right) \]
      2. Add Preprocessing
      3. Taylor expanded in b around 0

        \[\leadsto a + \color{blue}{\log \left(1 - e^{a}\right)} \]
      4. Step-by-step derivation
        1. sub-negN/A

          \[\leadsto a + \log \color{blue}{\left(1 + \left(\mathsf{neg}\left(e^{a}\right)\right)\right)} \]
        2. lower-log1p.f64N/A

          \[\leadsto a + \color{blue}{\mathsf{log1p}\left(\mathsf{neg}\left(e^{a}\right)\right)} \]
        3. lower-neg.f64N/A

          \[\leadsto a + \mathsf{log1p}\left(\color{blue}{-e^{a}}\right) \]
        4. lower-exp.f6411.1

          \[\leadsto a + \mathsf{log1p}\left(-\color{blue}{e^{a}}\right) \]
      5. Applied rewrites11.1%

        \[\leadsto a + \color{blue}{\mathsf{log1p}\left(-e^{a}\right)} \]
      6. Taylor expanded in a around 0

        \[\leadsto a + \mathsf{log1p}\left(-1\right) \]
      7. Step-by-step derivation
        1. Applied rewrites3.1%

          \[\leadsto a + \mathsf{log1p}\left(-1\right) \]
        2. Final simplification3.1%

          \[\leadsto \mathsf{log1p}\left(-1\right) + a \]
        3. Add Preprocessing

        Reproduce

        ?
        herbie shell --seed 1 
        (FPCore (a b)
          :name "a + log(1 - exp(a)/exp(b))"
          :precision binary64
          :pre (and (and (<= -2.2e+16 a) (<= a 1.0)) (and (<= -2.2e+16 b) (<= b 1.0)))
          (+ a (log (- 1.0 (/ (exp a) (exp b))))))