diff --git a/minitorch/module.py b/minitorch/module.py index 0a66058c..a57b1afd 100644 --- a/minitorch/module.py +++ b/minitorch/module.py @@ -32,12 +32,18 @@ def modules(self) -> Sequence[Module]: def train(self) -> None: """Set the mode of this module and all descendent modules to `train`.""" # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + self.training = True + m: Dict[str, Module] = self.__dict__["_modules"] + for module in m.values(): + module.train() def eval(self) -> None: """Set the mode of this module and all descendent modules to `eval`.""" # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + self.training = False + m: Dict[str, Module] = self.__dict__["_modules"] + for module in m.values(): + module.eval() def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: """Collect all the parameters of this module and its descendents. @@ -48,12 +54,25 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: """ # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + m: Dict[str, Module] = self.__dict__["_modules"] + p: Dict[str, Parameter] = self.__dict__["_parameters"] + nps = [(name, parameter) for name, parameter in p.items()] + for module_name, module in m.items(): + nps += [ + (".".join([module_name, name]), parameter) + for name, parameter in module.named_parameters() + ] + return nps def parameters(self) -> Sequence[Parameter]: """Enumerate over all the parameters of this module and its descendents.""" # TODO: Implement for Task 0.4. - raise NotImplementedError("Need to implement for Task 0.4") + m: Dict[str, Module] = self.__dict__["_modules"] + p: Dict[str, Parameter] = self.__dict__["_parameters"] + ps = [parameter for parameter in p.values()] + for module in m.values(): + ps += module.parameters() + return ps def add_parameter(self, k: str, v: Any) -> Parameter: """Manually add a parameter. Useful helper for scalar parameters. diff --git a/minitorch/operators.py b/minitorch/operators.py index 37cc7c09..134e887d 100644 --- a/minitorch/operators.py +++ b/minitorch/operators.py @@ -1,9 +1,11 @@ -"""Collection of the core mathematical operators used throughout the code base.""" +""" +Collection of the core mathematical operators used throughout the code base. +""" import math # ## Task 0.1 -from typing import Callable, Iterable +from typing import Callable, Iterable, Optional # # Implementation of a prelude of elementary functions. @@ -33,6 +35,159 @@ # TODO: Implement for Task 0.1. +def mul(x: float, y: float) -> float: + """ + Scalar multiplication. + + Args: + x: A float value + y: A float value + + Returns: + A float value x multiplied by y + """ + return x * y + + +def id(x: float) -> float: + """ + Identity function. + + Args: + x: A float value. + + Returns: + The float input unchanged. + """ + return x + + +def add(x: float, y: float) -> float: + """ + Addition function. + + Args: + x: A float value. + y: A float value. + + Returns: + A float value x added to y + """ + return x + y + + +def neg(x: float) -> float: + """ + Negation function. + + Args: + x: A float value. + + Returns: + A float value x multiplied by -1.0 + """ + return -x + + +def lt(x: float, y: float) -> bool: + """ + Compares 2 float values. + + Args: + x: A float value. + y: A float value. + + Returns: + A boolean value. True if x is less than y. + """ + return x < y + + +def eq(x: float, y: float) -> bool: + """ + Equality function. + + Args: + x: A float value. + y: A float value. + + Returns: + A boolean value. True if x is equal to y. + """ + return x == y + + +def max(x: float, y: float) -> float: + """ + Max function. + + Args: + x: A float value. + y: A float value. + + Returns: + The larger value between x and y. + """ + return x if x > y else y + + +def is_close( + x: float, y: float, atol: Optional[float] = 1e-8, rtol: Optional[float] = 1e-5 +) -> bool: + """ + Checks if x is close to y. Obtained equation from https://pytorch.org/docs/stable/generated/torch.isclose.html. + + Args: + x: A float value. + y: A float value. + atol: Absolute tolerance. Default: 1e-8 + rtol: Relative tolerance. Default: 1e-5 + + Returns: + A boolean value indicating if x is close to y + """ + return math.fabs(x - y) <= (atol + rtol * math.fabs(y)) + + +def sigmoid(x: float) -> float: + """ + Sigmoid function. + + Args: + x: A float value + + Returns: + A float value 1 / (1 + e^-x) + """ + return 1.0 / (1.0 + math.exp(-x)) + + +def relu(x: float) -> float: + return 0.0 if x <= 0.0 else x + + +def log(x: float) -> float: + return math.log(x) + + +def exp(x: float) -> float: + return math.exp(x) + + +def inv(x: float) -> float: + return 1.0 / x + + +def log_back(x: float, y: float) -> float: + return y / x + + +def inv_back(x: float, y: float) -> float: + return -y / x**2 + + +def relu_back(x: float, y: float) -> float: + return 0.0 if x <= 0 else y # ## Task 0.3 @@ -52,3 +207,37 @@ # TODO: Implement for Task 0.3. +def map(func: Callable[[float], float], xs: Iterable[float]) -> Iterable[float]: + return [func(x) for x in xs] + + +def zipWith( + func: Callable[[float, float], float], xs: Iterable[float], ys: Iterable[float] +) -> Iterable[float]: + return [func(x, y) for x, y in zip(xs, ys)] + + +def reduce(func: Callable[[float, float], float], xs: Iterable[float]) -> float: + if len(xs) == 0: + return 0.0 + + acc = xs[0] + for x in xs[1:]: + acc = func(acc, x) + return acc + + +def negList(xs: Iterable[float]) -> Iterable[float]: + return map(neg, xs) + + +def addLists(xs: Iterable[float], ys: Iterable[float]) -> Iterable[float]: + return zipWith(add, xs, ys) + + +def sum(xs: Iterable[float]) -> float: + return reduce(add, xs) + + +def prod(xs: Iterable[float]) -> float: + return reduce(mul, xs) diff --git a/requirements.txt b/requirements.txt index c9cd8a02..8c2394ad 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ colorama==0.4.3 hypothesis == 6.54 numba == 0.60 -numpy == 2.0.0 +numpy<2 pre-commit == 2.20.0 pytest == 8.3.2 pytest-env diff --git a/tests/test_operators.py b/tests/test_operators.py index f6e555af..b0a5ff1f 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -105,10 +105,12 @@ def test_sigmoid(a: float) -> None: * It is always between 0.0 and 1.0. * one minus sigmoid is the same as sigmoid of the negative * It crosses 0 at 0.5 - * It is strictly increasing. + * It is strictly increasing. """ # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + assert 0.0 <= sigmoid(a) and sigmoid(a) <= 1.0 + assert_close(1 - sigmoid(a), sigmoid(-a)) + assert_close(sigmoid(0.0), 0.5) @pytest.mark.task0_2 @@ -116,32 +118,36 @@ def test_sigmoid(a: float) -> None: def test_transitive(a: float, b: float, c: float) -> None: """Test the transitive property of less-than (a < b and b < c implies a < c)""" # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + if lt(a, b) and lt(b, c): + assert lt(a, c) @pytest.mark.task0_2 -def test_symmetric() -> None: +@given(small_floats, small_floats) +def test_symmetric(a: float, b: float) -> None: """Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e. gives the same value regardless of the order of its input. """ # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + assert_close(mul(a, b), mul(b, a)) @pytest.mark.task0_2 -def test_distribute() -> None: +@given(small_floats, small_floats, small_floats) +def test_distribute(a: float, b: float, c: float) -> None: r"""Write a test that ensures that your operators distribute, i.e. :math:`z \times (x + y) = z \times x + z \times y` """ # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + assert_close(mul(c, add(a, b)), add(mul(c, a), mul(c, b))) @pytest.mark.task0_2 -def test_other() -> None: +@given(small_floats) +def test_other(a: float) -> None: """Write a test that ensures some other property holds for your functions.""" # TODO: Implement for Task 0.2. - raise NotImplementedError("Need to implement for Task 0.2") + assert id(a) == a # ## Task 0.3 - Higher-order functions @@ -169,7 +175,10 @@ def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None: is the same as the sum of each element of `ls1` plus each element of `ls2`. """ # TODO: Implement for Task 0.3. - raise NotImplementedError("Need to implement for Task 0.3") + assert_close( + minitorch.operators.sum(addLists(ls1, ls2)), + add(minitorch.operators.sum(ls1), minitorch.operators.sum(ls2)), + ) @pytest.mark.task0_3