Skip to content

Fix runtime error in Task 0.5 due to numpy version #40

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 23 additions & 4 deletions minitorch/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,18 @@ def modules(self) -> Sequence[Module]:
def train(self) -> None:
"""Set the mode of this module and all descendent modules to `train`."""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
self.training = True
m: Dict[str, Module] = self.__dict__["_modules"]
for module in m.values():
module.train()

def eval(self) -> None:
"""Set the mode of this module and all descendent modules to `eval`."""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
self.training = False
m: Dict[str, Module] = self.__dict__["_modules"]
for module in m.values():
module.eval()

def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:
"""Collect all the parameters of this module and its descendents.
Expand All @@ -48,12 +54,25 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]:

"""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
m: Dict[str, Module] = self.__dict__["_modules"]
p: Dict[str, Parameter] = self.__dict__["_parameters"]
nps = [(name, parameter) for name, parameter in p.items()]
for module_name, module in m.items():
nps += [
(".".join([module_name, name]), parameter)
for name, parameter in module.named_parameters()
]
return nps

def parameters(self) -> Sequence[Parameter]:
"""Enumerate over all the parameters of this module and its descendents."""
# TODO: Implement for Task 0.4.
raise NotImplementedError("Need to implement for Task 0.4")
m: Dict[str, Module] = self.__dict__["_modules"]
p: Dict[str, Parameter] = self.__dict__["_parameters"]
ps = [parameter for parameter in p.values()]
for module in m.values():
ps += module.parameters()
return ps

def add_parameter(self, k: str, v: Any) -> Parameter:
"""Manually add a parameter. Useful helper for scalar parameters.
Expand Down
193 changes: 191 additions & 2 deletions minitorch/operators.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
"""Collection of the core mathematical operators used throughout the code base."""
"""
Collection of the core mathematical operators used throughout the code base.
"""

import math

# ## Task 0.1
from typing import Callable, Iterable
from typing import Callable, Iterable, Optional

#
# Implementation of a prelude of elementary functions.
Expand Down Expand Up @@ -33,6 +35,159 @@


# TODO: Implement for Task 0.1.
def mul(x: float, y: float) -> float:
"""
Scalar multiplication.

Args:
x: A float value
y: A float value

Returns:
A float value x multiplied by y
"""
return x * y


def id(x: float) -> float:
"""
Identity function.

Args:
x: A float value.

Returns:
The float input unchanged.
"""
return x


def add(x: float, y: float) -> float:
"""
Addition function.

Args:
x: A float value.
y: A float value.

Returns:
A float value x added to y
"""
return x + y


def neg(x: float) -> float:
"""
Negation function.

Args:
x: A float value.

Returns:
A float value x multiplied by -1.0
"""
return -x


def lt(x: float, y: float) -> bool:
"""
Compares 2 float values.

Args:
x: A float value.
y: A float value.

Returns:
A boolean value. True if x is less than y.
"""
return x < y


def eq(x: float, y: float) -> bool:
"""
Equality function.

Args:
x: A float value.
y: A float value.

Returns:
A boolean value. True if x is equal to y.
"""
return x == y


def max(x: float, y: float) -> float:
"""
Max function.

Args:
x: A float value.
y: A float value.

Returns:
The larger value between x and y.
"""
return x if x > y else y


def is_close(
x: float, y: float, atol: Optional[float] = 1e-8, rtol: Optional[float] = 1e-5
) -> bool:
"""
Checks if x is close to y. Obtained equation from https://pytorch.org/docs/stable/generated/torch.isclose.html.

Args:
x: A float value.
y: A float value.
atol: Absolute tolerance. Default: 1e-8
rtol: Relative tolerance. Default: 1e-5

Returns:
A boolean value indicating if x is close to y
"""
return math.fabs(x - y) <= (atol + rtol * math.fabs(y))


def sigmoid(x: float) -> float:
"""
Sigmoid function.

Args:
x: A float value

Returns:
A float value 1 / (1 + e^-x)
"""
return 1.0 / (1.0 + math.exp(-x))


def relu(x: float) -> float:
return 0.0 if x <= 0.0 else x


def log(x: float) -> float:
return math.log(x)


def exp(x: float) -> float:
return math.exp(x)


def inv(x: float) -> float:
return 1.0 / x


def log_back(x: float, y: float) -> float:
return y / x


def inv_back(x: float, y: float) -> float:
return -y / x**2


def relu_back(x: float, y: float) -> float:
return 0.0 if x <= 0 else y


# ## Task 0.3
Expand All @@ -52,3 +207,37 @@


# TODO: Implement for Task 0.3.
def map(func: Callable[[float], float], xs: Iterable[float]) -> Iterable[float]:
return [func(x) for x in xs]


def zipWith(
func: Callable[[float, float], float], xs: Iterable[float], ys: Iterable[float]
) -> Iterable[float]:
return [func(x, y) for x, y in zip(xs, ys)]


def reduce(func: Callable[[float, float], float], xs: Iterable[float]) -> float:
if len(xs) == 0:
return 0.0

acc = xs[0]
for x in xs[1:]:
acc = func(acc, x)
return acc


def negList(xs: Iterable[float]) -> Iterable[float]:
return map(neg, xs)


def addLists(xs: Iterable[float], ys: Iterable[float]) -> Iterable[float]:
return zipWith(add, xs, ys)


def sum(xs: Iterable[float]) -> float:
return reduce(add, xs)


def prod(xs: Iterable[float]) -> float:
return reduce(mul, xs)
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
colorama==0.4.3
hypothesis == 6.54
numba == 0.60
numpy == 2.0.0
numpy<2
pre-commit == 2.20.0
pytest == 8.3.2
pytest-env
Expand Down
29 changes: 19 additions & 10 deletions tests/test_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,43 +105,49 @@ def test_sigmoid(a: float) -> None:
* It is always between 0.0 and 1.0.
* one minus sigmoid is the same as sigmoid of the negative
* It crosses 0 at 0.5
* It is strictly increasing.
* It is strictly increasing.
"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert 0.0 <= sigmoid(a) and sigmoid(a) <= 1.0
assert_close(1 - sigmoid(a), sigmoid(-a))
assert_close(sigmoid(0.0), 0.5)


@pytest.mark.task0_2
@given(small_floats, small_floats, small_floats)
def test_transitive(a: float, b: float, c: float) -> None:
"""Test the transitive property of less-than (a < b and b < c implies a < c)"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
if lt(a, b) and lt(b, c):
assert lt(a, c)


@pytest.mark.task0_2
def test_symmetric() -> None:
@given(small_floats, small_floats)
def test_symmetric(a: float, b: float) -> None:
"""Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e.
gives the same value regardless of the order of its input.
"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert_close(mul(a, b), mul(b, a))


@pytest.mark.task0_2
def test_distribute() -> None:
@given(small_floats, small_floats, small_floats)
def test_distribute(a: float, b: float, c: float) -> None:
r"""Write a test that ensures that your operators distribute, i.e.
:math:`z \times (x + y) = z \times x + z \times y`
"""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert_close(mul(c, add(a, b)), add(mul(c, a), mul(c, b)))


@pytest.mark.task0_2
def test_other() -> None:
@given(small_floats)
def test_other(a: float) -> None:
"""Write a test that ensures some other property holds for your functions."""
# TODO: Implement for Task 0.2.
raise NotImplementedError("Need to implement for Task 0.2")
assert id(a) == a


# ## Task 0.3 - Higher-order functions
Expand Down Expand Up @@ -169,7 +175,10 @@ def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None:
is the same as the sum of each element of `ls1` plus each element of `ls2`.
"""
# TODO: Implement for Task 0.3.
raise NotImplementedError("Need to implement for Task 0.3")
assert_close(
minitorch.operators.sum(addLists(ls1, ls2)),
add(minitorch.operators.sum(ls1), minitorch.operators.sum(ls2)),
)


@pytest.mark.task0_3
Expand Down