\[\left(-1 \leq a \land a \leq 1\right) \land \left(-1 \leq b \land b \leq 1\right)\]
Math FPCore C Fortran Java Python Julia MATLAB Wolfram TeX \[a + \frac{1}{a - b}
\]
↓
\[a + \frac{1}{a - b}
\]
(FPCore (a b) :precision binary64 (+ a (/ 1.0 (- a b)))) ↓
(FPCore (a b) :precision binary64 (+ a (/ 1.0 (- a b)))) double code(double a, double b) {
return a + (1.0 / (a - b));
}
↓
double code(double a, double b) {
return a + (1.0 / (a - b));
}
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = a + (1.0d0 / (a - b))
end function
↓
real(8) function code(a, b)
real(8), intent (in) :: a
real(8), intent (in) :: b
code = a + (1.0d0 / (a - b))
end function
public static double code(double a, double b) {
return a + (1.0 / (a - b));
}
↓
public static double code(double a, double b) {
return a + (1.0 / (a - b));
}
def code(a, b):
return a + (1.0 / (a - b))
↓
def code(a, b):
return a + (1.0 / (a - b))
function code(a, b)
return Float64(a + Float64(1.0 / Float64(a - b)))
end
↓
function code(a, b)
return Float64(a + Float64(1.0 / Float64(a - b)))
end
function tmp = code(a, b)
tmp = a + (1.0 / (a - b));
end
↓
function tmp = code(a, b)
tmp = a + (1.0 / (a - b));
end
code[a_, b_] := N[(a + N[(1.0 / N[(a - b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
↓
code[a_, b_] := N[(a + N[(1.0 / N[(a - b), $MachinePrecision]), $MachinePrecision]), $MachinePrecision]
a + \frac{1}{a - b}
↓
a + \frac{1}{a - b}