(exp(k1*dx) - 1)/k1

Percentage Accurate: 32.0% → 100.0%
Time: 1.6s
Alternatives: 3
Speedup: 21.4×

Specification

?
\[\left(-1.79 \cdot 10^{+308} \leq k1 \land k1 \leq 1.79 \cdot 10^{+308}\right) \land \left(0 \leq dx \land dx \leq 1000000000\right)\]
\[\frac{e^{k1 \cdot dx} - 1}{k1} \]
(FPCore (k1 dx)
  :precision binary64
  (/ (- (exp (* k1 dx)) 1.0) k1))
double code(double k1, double dx) {
	return (exp((k1 * dx)) - 1.0) / k1;
}
real(8) function code(k1, dx)
    real(8), intent (in) :: k1
    real(8), intent (in) :: dx
    code = (exp((k1 * dx)) - 1.0d0) / k1
end function
public static double code(double k1, double dx) {
	return (Math.exp((k1 * dx)) - 1.0) / k1;
}
def code(k1, dx):
	return (math.exp((k1 * dx)) - 1.0) / k1
function code(k1, dx)
	return Float64(Float64(exp(Float64(k1 * dx)) - 1.0) / k1)
end
function tmp = code(k1, dx)
	tmp = (exp((k1 * dx)) - 1.0) / k1;
end
code[k1_, dx_] := N[(N[(N[Exp[N[(k1 * dx), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision] / k1), $MachinePrecision]
\frac{e^{k1 \cdot dx} - 1}{k1}

Local Percentage Accuracy vs ?

The average percentage accuracy by input value. Horizontal axis shows value of an input variable; the variable is choosen in the title. Vertical axis is accuracy; higher is better. Red represent the original program, while blue represents Herbie's suggestion. These can be toggled with buttons below the plot. The line is an average while dots represent individual samples.

Accuracy vs Speed?

Herbie found 3 alternatives:

AlternativeAccuracySpeedup
The accuracy (vertical axis) and speed (horizontal axis) of each alternatives. Up and to the right is better. The red square shows the initial program, and each blue circle shows an alternative.The line shows the best available speed-accuracy tradeoffs.

Initial Program: 32.0% accurate, 1.0× speedup?

\[\frac{e^{k1 \cdot dx} - 1}{k1} \]
(FPCore (k1 dx)
  :precision binary64
  (/ (- (exp (* k1 dx)) 1.0) k1))
double code(double k1, double dx) {
	return (exp((k1 * dx)) - 1.0) / k1;
}
real(8) function code(k1, dx)
    real(8), intent (in) :: k1
    real(8), intent (in) :: dx
    code = (exp((k1 * dx)) - 1.0d0) / k1
end function
public static double code(double k1, double dx) {
	return (Math.exp((k1 * dx)) - 1.0) / k1;
}
def code(k1, dx):
	return (math.exp((k1 * dx)) - 1.0) / k1
function code(k1, dx)
	return Float64(Float64(exp(Float64(k1 * dx)) - 1.0) / k1)
end
function tmp = code(k1, dx)
	tmp = (exp((k1 * dx)) - 1.0) / k1;
end
code[k1_, dx_] := N[(N[(N[Exp[N[(k1 * dx), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision] / k1), $MachinePrecision]
\frac{e^{k1 \cdot dx} - 1}{k1}

Alternative 1: 100.0% accurate, 0.5× speedup?

\[\begin{array}{l} \mathbf{if}\;\frac{e^{k1 \cdot dx} - 1}{k1} \leq 0:\\ \;\;\;\;dx\\ \mathbf{else}:\\ \;\;\;\;\frac{\mathsf{expm1}\left(dx \cdot k1\right)}{k1}\\ \end{array} \]
(FPCore (k1 dx)
  :precision binary64
  (if (<= (/ (- (exp (* k1 dx)) 1.0) k1) 0.0)
  dx
  (/ (expm1 (* dx k1)) k1)))
double code(double k1, double dx) {
	double tmp;
	if (((exp((k1 * dx)) - 1.0) / k1) <= 0.0) {
		tmp = dx;
	} else {
		tmp = expm1((dx * k1)) / k1;
	}
	return tmp;
}
public static double code(double k1, double dx) {
	double tmp;
	if (((Math.exp((k1 * dx)) - 1.0) / k1) <= 0.0) {
		tmp = dx;
	} else {
		tmp = Math.expm1((dx * k1)) / k1;
	}
	return tmp;
}
def code(k1, dx):
	tmp = 0
	if ((math.exp((k1 * dx)) - 1.0) / k1) <= 0.0:
		tmp = dx
	else:
		tmp = math.expm1((dx * k1)) / k1
	return tmp
function code(k1, dx)
	tmp = 0.0
	if (Float64(Float64(exp(Float64(k1 * dx)) - 1.0) / k1) <= 0.0)
		tmp = dx;
	else
		tmp = Float64(expm1(Float64(dx * k1)) / k1);
	end
	return tmp
end
code[k1_, dx_] := If[LessEqual[N[(N[(N[Exp[N[(k1 * dx), $MachinePrecision]], $MachinePrecision] - 1.0), $MachinePrecision] / k1), $MachinePrecision], 0.0], dx, N[(N[(Exp[N[(dx * k1), $MachinePrecision]] - 1), $MachinePrecision] / k1), $MachinePrecision]]
\begin{array}{l}
\mathbf{if}\;\frac{e^{k1 \cdot dx} - 1}{k1} \leq 0:\\
\;\;\;\;dx\\

\mathbf{else}:\\
\;\;\;\;\frac{\mathsf{expm1}\left(dx \cdot k1\right)}{k1}\\


\end{array}
Derivation
  1. Split input into 2 regimes
  2. if (/.f64 (-.f64 (exp.f64 (*.f64 k1 dx)) #s(literal 1 binary64)) k1) < 0.0

    1. Initial program 32.0%

      \[\frac{e^{k1 \cdot dx} - 1}{k1} \]
    2. Taylor expanded in k1 around 0

      \[\leadsto \color{blue}{dx} \]
    3. Step-by-step derivation
      1. Applied rewrites73.9%

        \[\leadsto \color{blue}{dx} \]

      if 0.0 < (/.f64 (-.f64 (exp.f64 (*.f64 k1 dx)) #s(literal 1 binary64)) k1)

      1. Initial program 32.0%

        \[\frac{e^{k1 \cdot dx} - 1}{k1} \]
      2. Step-by-step derivation
        1. lift--.f64N/A

          \[\leadsto \frac{\color{blue}{e^{k1 \cdot dx} - 1}}{k1} \]
        2. lift-exp.f64N/A

          \[\leadsto \frac{\color{blue}{e^{k1 \cdot dx}} - 1}{k1} \]
        3. lower-expm1.f6479.2%

          \[\leadsto \frac{\color{blue}{\mathsf{expm1}\left(k1 \cdot dx\right)}}{k1} \]
        4. lift-*.f64N/A

          \[\leadsto \frac{\mathsf{expm1}\left(\color{blue}{k1 \cdot dx}\right)}{k1} \]
        5. *-commutativeN/A

          \[\leadsto \frac{\mathsf{expm1}\left(\color{blue}{dx \cdot k1}\right)}{k1} \]
        6. lower-*.f6479.2%

          \[\leadsto \frac{\mathsf{expm1}\left(\color{blue}{dx \cdot k1}\right)}{k1} \]
      3. Applied rewrites79.2%

        \[\leadsto \frac{\color{blue}{\mathsf{expm1}\left(dx \cdot k1\right)}}{k1} \]
    4. Recombined 2 regimes into one program.
    5. Add Preprocessing

    Alternative 2: 75.7% accurate, 1.6× speedup?

    \[\frac{1}{\mathsf{fma}\left(-0.5, k1, \frac{1}{dx}\right)} \]
    (FPCore (k1 dx)
      :precision binary64
      (/ 1.0 (fma -0.5 k1 (/ 1.0 dx))))
    double code(double k1, double dx) {
    	return 1.0 / fma(-0.5, k1, (1.0 / dx));
    }
    
    function code(k1, dx)
    	return Float64(1.0 / fma(-0.5, k1, Float64(1.0 / dx)))
    end
    
    code[k1_, dx_] := N[(1.0 / N[(-0.5 * k1 + N[(1.0 / dx), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
    
    \frac{1}{\mathsf{fma}\left(-0.5, k1, \frac{1}{dx}\right)}
    
    Derivation
    1. Initial program 32.0%

      \[\frac{e^{k1 \cdot dx} - 1}{k1} \]
    2. Taylor expanded in k1 around 0

      \[\leadsto \frac{\color{blue}{1} - 1}{k1} \]
    3. Step-by-step derivation
      1. Applied rewrites5.3%

        \[\leadsto \frac{\color{blue}{1} - 1}{k1} \]
      2. Taylor expanded in k1 around 0

        \[\leadsto \frac{\color{blue}{dx \cdot k1}}{k1} \]
      3. Step-by-step derivation
        1. lower-*.f6453.1%

          \[\leadsto \frac{dx \cdot \color{blue}{k1}}{k1} \]
      4. Applied rewrites53.1%

        \[\leadsto \frac{\color{blue}{dx \cdot k1}}{k1} \]
      5. Step-by-step derivation
        1. lift-/.f64N/A

          \[\leadsto \color{blue}{\frac{dx \cdot k1}{k1}} \]
        2. div-flipN/A

          \[\leadsto \color{blue}{\frac{1}{\frac{k1}{dx \cdot k1}}} \]
        3. lower-unsound-/.f64N/A

          \[\leadsto \color{blue}{\frac{1}{\frac{k1}{dx \cdot k1}}} \]
        4. lower-unsound-/.f6453.0%

          \[\leadsto \frac{1}{\color{blue}{\frac{k1}{dx \cdot k1}}} \]
      6. Applied rewrites53.0%

        \[\leadsto \color{blue}{\frac{1}{\frac{k1}{dx \cdot k1}}} \]
      7. Taylor expanded in k1 around 0

        \[\leadsto \frac{1}{\color{blue}{\frac{-1}{2} \cdot k1 + \frac{1}{dx}}} \]
      8. Step-by-step derivation
        1. lower-fma.f64N/A

          \[\leadsto \frac{1}{\mathsf{fma}\left(\frac{-1}{2}, \color{blue}{k1}, \frac{1}{dx}\right)} \]
        2. lower-/.f6475.7%

          \[\leadsto \frac{1}{\mathsf{fma}\left(-0.5, k1, \frac{1}{dx}\right)} \]
      9. Applied rewrites75.7%

        \[\leadsto \frac{1}{\color{blue}{\mathsf{fma}\left(-0.5, k1, \frac{1}{dx}\right)}} \]
      10. Add Preprocessing

      Alternative 3: 73.9% accurate, 21.4× speedup?

      \[dx \]
      (FPCore (k1 dx)
        :precision binary64
        dx)
      double code(double k1, double dx) {
      	return dx;
      }
      
      real(8) function code(k1, dx)
          real(8), intent (in) :: k1
          real(8), intent (in) :: dx
          code = dx
      end function
      
      public static double code(double k1, double dx) {
      	return dx;
      }
      
      def code(k1, dx):
      	return dx
      
      function code(k1, dx)
      	return dx
      end
      
      function tmp = code(k1, dx)
      	tmp = dx;
      end
      
      code[k1_, dx_] := dx
      
      dx
      
      Derivation
      1. Initial program 32.0%

        \[\frac{e^{k1 \cdot dx} - 1}{k1} \]
      2. Taylor expanded in k1 around 0

        \[\leadsto \color{blue}{dx} \]
      3. Step-by-step derivation
        1. Applied rewrites73.9%

          \[\leadsto \color{blue}{dx} \]
        2. Add Preprocessing

        Reproduce

        ?
        herbie shell --seed 1 
        (FPCore (k1 dx)
          :name "(exp(k1*dx) - 1)/k1"
          :precision binary64
          :pre (and (and (<= -1.79e+308 k1) (<= k1 1.79e+308)) (and (<= 0.0 dx) (<= dx 1000000000.0)))
          (/ (- (exp (* k1 dx)) 1.0) k1))