(FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2) :precision binary64 (* (* (* wC Dzm) sig) (+ (* (* v1 Azp) kA1) (* (* v2 Bzp) kA2))))
double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2)); }
real(8) function code(wc, dzm, sig, v1, azp, ka1, v2, bzp, ka2) real(8), intent (in) :: wc real(8), intent (in) :: dzm real(8), intent (in) :: sig real(8), intent (in) :: v1 real(8), intent (in) :: azp real(8), intent (in) :: ka1 real(8), intent (in) :: v2 real(8), intent (in) :: bzp real(8), intent (in) :: ka2 code = ((wc * dzm) * sig) * (((v1 * azp) * ka1) + ((v2 * bzp) * ka2)) end function
public static double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2)); }
def code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2): return ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2))
function code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) return Float64(Float64(Float64(wC * Dzm) * sig) * Float64(Float64(Float64(v1 * Azp) * kA1) + Float64(Float64(v2 * Bzp) * kA2))) end
function tmp = code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) tmp = ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2)); end
code[wC_, Dzm_, sig_, v1_, Azp_, kA1_, v2_, Bzp_, kA2_] := N[(N[(N[(wC * Dzm), $MachinePrecision] * sig), $MachinePrecision] * N[(N[(N[(v1 * Azp), $MachinePrecision] * kA1), $MachinePrecision] + N[(N[(v2 * Bzp), $MachinePrecision] * kA2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \left(\left(wC \cdot Dzm\right) \cdot sig\right) \cdot \left(\left(v1 \cdot Azp\right) \cdot kA1 + \left(v2 \cdot Bzp\right) \cdot kA2\right) \end{array}
Sampling outcomes in binary64 precision:
Herbie found 4 alternatives:
Alternative | Accuracy | Speedup |
---|
(FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2) :precision binary64 (* (* (* wC Dzm) sig) (+ (* (* v1 Azp) kA1) (* (* v2 Bzp) kA2))))
double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2)); }
real(8) function code(wc, dzm, sig, v1, azp, ka1, v2, bzp, ka2) real(8), intent (in) :: wc real(8), intent (in) :: dzm real(8), intent (in) :: sig real(8), intent (in) :: v1 real(8), intent (in) :: azp real(8), intent (in) :: ka1 real(8), intent (in) :: v2 real(8), intent (in) :: bzp real(8), intent (in) :: ka2 code = ((wc * dzm) * sig) * (((v1 * azp) * ka1) + ((v2 * bzp) * ka2)) end function
public static double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2)); }
def code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2): return ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2))
function code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) return Float64(Float64(Float64(wC * Dzm) * sig) * Float64(Float64(Float64(v1 * Azp) * kA1) + Float64(Float64(v2 * Bzp) * kA2))) end
function tmp = code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) tmp = ((wC * Dzm) * sig) * (((v1 * Azp) * kA1) + ((v2 * Bzp) * kA2)); end
code[wC_, Dzm_, sig_, v1_, Azp_, kA1_, v2_, Bzp_, kA2_] := N[(N[(N[(wC * Dzm), $MachinePrecision] * sig), $MachinePrecision] * N[(N[(N[(v1 * Azp), $MachinePrecision] * kA1), $MachinePrecision] + N[(N[(v2 * Bzp), $MachinePrecision] * kA2), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} \\ \left(\left(wC \cdot Dzm\right) \cdot sig\right) \cdot \left(\left(v1 \cdot Azp\right) \cdot kA1 + \left(v2 \cdot Bzp\right) \cdot kA2\right) \end{array}
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. (FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2) :precision binary64 (* (* (* sig (fma kA2 (* Bzp v2) (* kA1 (* Azp v1)))) Dzm) wC))
assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((sig * fma(kA2, (Bzp * v2), (kA1 * (Azp * v1)))) * Dzm) * wC; }
wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) function code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) return Float64(Float64(Float64(sig * fma(kA2, Float64(Bzp * v2), Float64(kA1 * Float64(Azp * v1)))) * Dzm) * wC) end
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. code[wC_, Dzm_, sig_, v1_, Azp_, kA1_, v2_, Bzp_, kA2_] := N[(N[(N[(sig * N[(kA2 * N[(Bzp * v2), $MachinePrecision] + N[(kA1 * N[(Azp * v1), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]), $MachinePrecision] * Dzm), $MachinePrecision] * wC), $MachinePrecision]
\begin{array}{l} [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\\\ [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\ \\ \left(\left(sig \cdot \mathsf{fma}\left(kA2, Bzp \cdot v2, kA1 \cdot \left(Azp \cdot v1\right)\right)\right) \cdot Dzm\right) \cdot wC \end{array}
Initial program 99.2%
lift-*.f64
N/A
lift-*.f64
N/A
associate-*l*
N/A
lift-*.f64
N/A
associate-*l*
N/A
*-commutative
N/A
lower-*.f64
N/A
Applied rewrites99.2%
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. (FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2) :precision binary64 (* (* sig Dzm) (* (fma (* kA1 Azp) v1 (* (* kA2 v2) Bzp)) wC)))
assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return (sig * Dzm) * (fma((kA1 * Azp), v1, ((kA2 * v2) * Bzp)) * wC); }
wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) function code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) return Float64(Float64(sig * Dzm) * Float64(fma(Float64(kA1 * Azp), v1, Float64(Float64(kA2 * v2) * Bzp)) * wC)) end
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. code[wC_, Dzm_, sig_, v1_, Azp_, kA1_, v2_, Bzp_, kA2_] := N[(N[(sig * Dzm), $MachinePrecision] * N[(N[(N[(kA1 * Azp), $MachinePrecision] * v1 + N[(N[(kA2 * v2), $MachinePrecision] * Bzp), $MachinePrecision]), $MachinePrecision] * wC), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\\\ [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\ \\ \left(sig \cdot Dzm\right) \cdot \left(\mathsf{fma}\left(kA1 \cdot Azp, v1, \left(kA2 \cdot v2\right) \cdot Bzp\right) \cdot wC\right) \end{array}
Initial program 99.2%
Taylor expanded in wC around 0
Applied rewrites99.3%
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. (FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2) :precision binary64 (* (* (* (* Dzm kA1) (* v1 sig)) wC) Azp))
assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return (((Dzm * kA1) * (v1 * sig)) * wC) * Azp; }
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. real(8) function code(wc, dzm, sig, v1, azp, ka1, v2, bzp, ka2) real(8), intent (in) :: wc real(8), intent (in) :: dzm real(8), intent (in) :: sig real(8), intent (in) :: v1 real(8), intent (in) :: azp real(8), intent (in) :: ka1 real(8), intent (in) :: v2 real(8), intent (in) :: bzp real(8), intent (in) :: ka2 code = (((dzm * ka1) * (v1 * sig)) * wc) * azp end function
assert wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2; assert wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2; public static double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return (((Dzm * kA1) * (v1 * sig)) * wC) * Azp; }
[wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) def code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2): return (((Dzm * kA1) * (v1 * sig)) * wC) * Azp
wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) function code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) return Float64(Float64(Float64(Float64(Dzm * kA1) * Float64(v1 * sig)) * wC) * Azp) end
wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = num2cell(sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])){:} wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = num2cell(sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])){:} function tmp = code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) tmp = (((Dzm * kA1) * (v1 * sig)) * wC) * Azp; end
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. code[wC_, Dzm_, sig_, v1_, Azp_, kA1_, v2_, Bzp_, kA2_] := N[(N[(N[(N[(Dzm * kA1), $MachinePrecision] * N[(v1 * sig), $MachinePrecision]), $MachinePrecision] * wC), $MachinePrecision] * Azp), $MachinePrecision]
\begin{array}{l} [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\\\ [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\ \\ \left(\left(\left(Dzm \cdot kA1\right) \cdot \left(v1 \cdot sig\right)\right) \cdot wC\right) \cdot Azp \end{array}
Initial program 99.2%
Taylor expanded in v1 around inf
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
85.9
Applied rewrites85.9%
Applied rewrites86.0%
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. (FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2) :precision binary64 (* (* (* Dzm Azp) (* kA1 sig)) (* wC v1)))
assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); assert(wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2); double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((Dzm * Azp) * (kA1 * sig)) * (wC * v1); }
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. real(8) function code(wc, dzm, sig, v1, azp, ka1, v2, bzp, ka2) real(8), intent (in) :: wc real(8), intent (in) :: dzm real(8), intent (in) :: sig real(8), intent (in) :: v1 real(8), intent (in) :: azp real(8), intent (in) :: ka1 real(8), intent (in) :: v2 real(8), intent (in) :: bzp real(8), intent (in) :: ka2 code = ((dzm * azp) * (ka1 * sig)) * (wc * v1) end function
assert wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2; assert wC < Dzm && Dzm < sig && sig < v1 && v1 < Azp && Azp < kA1 && kA1 < v2 && v2 < Bzp && Bzp < kA2; public static double code(double wC, double Dzm, double sig, double v1, double Azp, double kA1, double v2, double Bzp, double kA2) { return ((Dzm * Azp) * (kA1 * sig)) * (wC * v1); }
[wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) def code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2): return ((Dzm * Azp) * (kA1 * sig)) * (wC * v1)
wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2]) function code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) return Float64(Float64(Float64(Dzm * Azp) * Float64(kA1 * sig)) * Float64(wC * v1)) end
wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = num2cell(sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])){:} wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2 = num2cell(sort([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])){:} function tmp = code(wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2) tmp = ((Dzm * Azp) * (kA1 * sig)) * (wC * v1); end
NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. NOTE: wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, and kA2 should be sorted in increasing order before calling this function. code[wC_, Dzm_, sig_, v1_, Azp_, kA1_, v2_, Bzp_, kA2_] := N[(N[(N[(Dzm * Azp), $MachinePrecision] * N[(kA1 * sig), $MachinePrecision]), $MachinePrecision] * N[(wC * v1), $MachinePrecision]), $MachinePrecision]
\begin{array}{l} [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\\\ [wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2] = \mathsf{sort}([wC, Dzm, sig, v1, Azp, kA1, v2, Bzp, kA2])\\ \\ \left(\left(Dzm \cdot Azp\right) \cdot \left(kA1 \cdot sig\right)\right) \cdot \left(wC \cdot v1\right) \end{array}
Initial program 99.2%
Taylor expanded in v1 around inf
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
N/A
*-commutative
N/A
lower-*.f64
85.9
Applied rewrites85.9%
Applied rewrites85.9%
herbie shell --seed 1
(FPCore (wC Dzm sig v1 Azp kA1 v2 Bzp kA2)
:name "wC * Dzm * sig * (v1*Azp*kA1 + v2*Bzp*kA2)"
:precision binary64
:pre (and (and (and (and (and (and (and (and (and (<= 0.0001 wC) (<= wC 1.0)) (and (<= 1e-24 Dzm) (<= Dzm 10.0))) (and (<= 1e-24 sig) (<= sig 1.0))) (and (<= 1.0 v1) (<= v1 3.0))) (and (<= 1.0 Azp) (<= Azp 2.0))) (and (<= 1e-24 kA1) (<= kA1 10000000.0))) (and (<= 1e-24 v2) (<= v2 1.0))) (and (<= 1e-24 Bzp) (<= Bzp 1.0))) (and (<= 1e-24 kA2) (<= kA2 10000000.0)))
(* (* (* wC Dzm) sig) (+ (* (* v1 Azp) kA1) (* (* v2 Bzp) kA2))))