LOTUS (Law Of The Unconscious Statistician)
1-D version
\[ \small{ \begin{aligned} \text{E}\big[g(X)\big]= \begin{cases} \displaystyle \sum_x g(x) \cdot p_X(x), & \text{if $X$ is discrete,} \\ \int_{-\infty}^{+\infty} g(x) \cdot f_X(x) dx, & \text{if $X$ is continuous.} \\ \end{cases} \end{aligned} } \]
2-D version
\[ \small{ \begin{aligned} \text{E}\big[g(X, Y)\big]= \begin{cases} \displaystyle \sum_x \sum_y g(x, y) \cdot p_{X, Y}(x, y), & \text{if discrete,} \\ \int_{-\infty}^{+\infty} \int_{-\infty}^{+\infty} g(x, y) \cdot f_{X, Y}(x, y) dxdy, & \text{if continuous.} \\ \end{cases} \end{aligned} } \]
If \(X\) and \(Y\) are independent
\[ \text{E}[X \cdot Y]=\text{E}[X] \cdot \text{E}[Y] \]
If two RVs are independent, the expected value of their product is the product of their own expected values.
\[ \begin{aligned} \text{E}[XY] &= \sum_x \sum_y xy p_{X, Y}(x, y) \;\;\;\;\;\;\;\; \color{gray}{\leftarrow\text{by 2-D LOTUS}}\\ &= \sum_x \sum_y xy p_{X}(x) p_{Y}(y) \;\;\;\;\;\; \color{gray}{\leftarrow\text{by independence}}\\ &= \sum_x \big (xp_{X}(x) \sum_y y p_{Y}(y)\big) \;\; \color{gray}{\leftarrow\text{factor out $xp_{X}(x)$}} \\ &= \sum_y y p_{Y}(y) \cdot \sum_x xp_{X}(x) \;\;\;\; \color{gray}{\leftarrow\text{factor out $\sum_y y p_{Y}(y)$}} \\ &= \text{E}[X] \cdot \text{E}[Y] \;\;\;\;\;\;\;\; \color{gray}{\leftarrow\text{by definition of expected value}} \\ \end{aligned} \]
For any two RVs \(X\) and \(Y\), we have
\[ \text{E}[X+Y]=\text{E}[X]+\text{E}[Y] \]
Note this property does not require their independence.
More generally, for any \(n\) RVs \(X_1, X_2, \cdots, X_n\),
\[ \small{\text{E}[X_1+X_2+\cdots+X_n]=\text{E}[X_1]+\text{E}[X_2] + \cdots + \text{E}[X_n]} \]
\[ \small{ \begin{aligned} \text{E}[X+Y] =& \sum_x \sum_y (x+y) p_{X, Y}(x, y) \;\;\;\; \color{gray}{\leftarrow\text{by LOTUS}} \\ =& \sum_x \sum_y \big (xp_{X, Y}(x, y)+yp_{X, Y}(x, y) \big) \\ =& \sum_x \sum_y xp_{X, Y}(x, y)+ \sum_y \sum_x yp_{X, Y}(x, y) \\ =& \sum_x x\sum_y p_{X, Y}(x, y)+ \sum_y y\sum_x p_{X, Y}(x, y) \\ =& \sum_x xp_{X}(x)+ \sum_y yp_{Y}(y) \;\; \color{gray}{\leftarrow\text{by def. of marginal pmf}} \\ =&\text{E}[X] + \text{E}[Y] \hspace{2em} \color{gray}{\leftarrow\text{by def. of expected value}} \\ \end{aligned} } \]
If \(X\) and \(Y\) are independent, then
\[ \text{var}(X + Y)=\text{var}(X) + \text{var}(Y) \]
Generally, if \(n\) RVs \(X_1, X_2, \cdots, X_n\) are independent,
\[ \text{var}\bigg(\sum_{i=1}^n X_i\bigg)=\sum_{i=1}^n \text{var}(X_i) \]
\[ \begin{aligned} \text{var}(X + Y)&=\text{E}\big[(X+Y)^2\big]-\big(\text{E}[X+Y]\big)^2 \\ \\ \text{E}\big[(X+Y)^2\big]&=\text{E}[X^2+2XY+Y^2] \\ \\ &=\text{E}[X^2]+2\text{E}[XY]+\text{E}[Y^2] \;\;\;\;\color{gray}{\leftarrow\text{by linearity}} \\ \\ &=\text{E}[X^2]+2\text{E}[X]\text{E}[Y]+\text{E}[Y^2] \;\color{gray}{\leftarrow\text{by indepen.}} \\ \end{aligned} \]
\[ \begin{aligned} \big(\text{E}[X+Y]\big)^2&=\big(\text{E}[X]+\text{E}[Y]\big)^2 \;\;\;\;\color{gray}{\leftarrow\text{by linearity}} \\ \\ &=\big(\text{E}[X]\big)^2+2\text{E}[X]\text{E}[Y]+\big(\text{E}[Y]\big)^2 \\ \\ \text{var}(X + Y)&=\text{E}\big[X^2\big]-\big(\text{E}[X]\big)^2 + \text{E}\big[Y^2\big] - \big(\text{E}[Y]\big)^2 \\ \\ &=\text{var}(X) + \text{var}(Y) \\ \end{aligned} \]
If \(X\) and \(Y\) are independent, then
\[ \begin{aligned} \text{var}(X + Y)&=\text{var}(X) + \text{var}(Y) \\ \\ \text{var}(X - Y)&=\;? \\ \end{aligned} \]