diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0cc8ec1d..d8ea226a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -28,35 +28,17 @@ repos: - id: requirements-txt-fixer - id: trailing-whitespace -- repo: https://github.com/timothycrosley/isort - rev: 5.10.1 +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.6.1 hooks: - - id: isort + # Run the linter. + - id: ruff + args: [ --fix ] + # Run the formatter. + - id: ruff-format -- repo: https://github.com/pre-commit/mirrors-mypy - rev: v0.971 +- repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.376 hooks: - - id: mypy - exclude: ^(docs/)|(project/)|(assignments/) - - -# Black, the code formatter, natively supports pre-commit -- repo: https://github.com/psf/black - rev: 22.6.0 - hooks: - - id: black - -# Flake8 also supports pre-commit natively (same author) -- repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 - hooks: - - id: flake8 - additional_dependencies: - - pep8-naming - exclude: ^(docs/)|(assignments/) - -# Doc linters -- repo: https://github.com/terrencepreilly/darglint - rev: v1.8.1 - hooks: - - id: darglint + - id: pyright diff --git a/README.md b/README.md index 46933775..bc41275d 100644 --- a/README.md +++ b/README.md @@ -14,4 +14,4 @@ python sync_previous_module.py previous-module-dir current-module-dir The files that will be synced are: - minitorch/operators.py minitorch/module.py tests/test_module.py tests/test_operators.py project/run_manual.py + minitorch/operators.py minitorch/module.py tests/test_module.py tests/test_operators.py project/run_manual.py \ No newline at end of file diff --git a/minitorch/__init__.py b/minitorch/__init__.py index 6f138f32..c78a438b 100644 --- a/minitorch/__init__.py +++ b/minitorch/__init__.py @@ -1,8 +1,8 @@ +from .testing import MathTest, MathTestVariable # type: ignore # noqa: F401,F403 from .autodiff import * # noqa: F401,F403 -from .datasets import * # noqa: F401,F403 -from .module import * # noqa: F401,F403 -from .optim import * # noqa: F401,F403 from .scalar import * # noqa: F401,F403 from .scalar_functions import * # noqa: F401,F403 +from .optim import * # noqa: F401,F403 +from .datasets import * # noqa: F401,F403 from .testing import * # noqa: F401,F403 -from .testing import MathTest, MathTestVariable # type: ignore # noqa: F401,F403 +from .module import * # noqa: F401,F403 diff --git a/minitorch/autodiff.py b/minitorch/autodiff.py index 2b69873b..9ed02845 100644 --- a/minitorch/autodiff.py +++ b/minitorch/autodiff.py @@ -1,26 +1,29 @@ +from __future__ import annotations + from dataclasses import dataclass -from typing import Any, Iterable, List, Tuple +from typing import Any, Iterable, List, Tuple, Protocol -from typing_extensions import Protocol # ## Task 1.1 # Central Difference calculation def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6) -> Any: - r""" - Computes an approximation to the derivative of `f` with respect to one arg. + r"""Computes an approximation to the derivative of `f` with respect to one arg. See :doc:`derivative` or https://en.wikipedia.org/wiki/Finite_difference for more details. Args: + ---- f : arbitrary function from n-scalar args to one value *vals : n-float values $x_0 \ldots x_{n-1}$ arg : the number $i$ of the arg to compute the derivative epsilon : a small constant Returns: + ------- An approximation of $f'_i(x_0, \ldots, x_{n-1})$ + """ # TODO: Implement for Task 1.1. raise NotImplementedError("Need to implement for Task 1.1") @@ -30,51 +33,48 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6) class Variable(Protocol): - def accumulate_derivative(self, x: Any) -> None: - pass + def accumulate_derivative(self, x: Any) -> None: ... @property - def unique_id(self) -> int: - pass + def unique_id(self) -> int: ... - def is_leaf(self) -> bool: - pass + def is_leaf(self) -> bool: ... - def is_constant(self) -> bool: - pass + def is_constant(self) -> bool: ... @property - def parents(self) -> Iterable["Variable"]: - pass + def parents(self) -> Iterable["Variable"]: ... - def chain_rule(self, d_output: Any) -> Iterable[Tuple["Variable", Any]]: - pass + def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]: ... def topological_sort(variable: Variable) -> Iterable[Variable]: - """ - Computes the topological order of the computation graph. + """Computes the topological order of the computation graph. Args: + ---- variable: The right-most variable Returns: + ------- Non-constant Variables in topological order starting from the right. + """ # TODO: Implement for Task 1.4. raise NotImplementedError("Need to implement for Task 1.4") def backpropagate(variable: Variable, deriv: Any) -> None: - """ - Runs backpropagation on the computation graph in order to + """Runs backpropagation on the computation graph in order to compute derivatives for the leave nodes. Args: + ---- variable: The right-most variable deriv : Its derivative that we want to propagate backward to the leaves. No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`. + """ # TODO: Implement for Task 1.4. raise NotImplementedError("Need to implement for Task 1.4") @@ -82,15 +82,13 @@ def backpropagate(variable: Variable, deriv: Any) -> None: @dataclass class Context: - """ - Context class is used by `Function` to store information during the forward pass. - """ + """Context class is used by `Function` to store information during the forward pass.""" no_grad: bool = False saved_values: Tuple[Any, ...] = () def save_for_backward(self, *values: Any) -> None: - "Store the given `values` if they need to be used during backpropagation." + """Store the given `values` if they need to be used during backpropagation.""" if self.no_grad: return self.saved_values = values diff --git a/minitorch/module.py b/minitorch/module.py index 11fc1f39..4782cb4f 100644 --- a/minitorch/module.py +++ b/minitorch/module.py @@ -4,11 +4,11 @@ class Module: - """ - Modules form a tree that store parameters and other + """Modules form a tree that store parameters and other submodules. They make up the basis of neural network stacks. - Attributes: + Attributes + ---------- _modules : Storage of the child modules _parameters : Storage of the module's parameters training : Whether the module is in training mode or evaluation mode @@ -25,42 +25,44 @@ def __init__(self) -> None: self.training = True def modules(self) -> Sequence[Module]: - "Return the direct child modules of this module." + """Return the direct child modules of this module.""" m: Dict[str, Module] = self.__dict__["_modules"] return list(m.values()) def train(self) -> None: - "Set the mode of this module and all descendent modules to `train`." + """Set the mode of this module and all descendent modules to `train`.""" raise NotImplementedError("Need to include this file from past assignment.") def eval(self) -> None: - "Set the mode of this module and all descendent modules to `eval`." + """Set the mode of this module and all descendent modules to `eval`.""" raise NotImplementedError("Need to include this file from past assignment.") def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: - """ - Collect all the parameters of this module and its descendents. + """Collect all the parameters of this module and its descendents. - - Returns: + Returns + ------- The name and `Parameter` of each ancestor parameter. + """ raise NotImplementedError("Need to include this file from past assignment.") def parameters(self) -> Sequence[Parameter]: - "Enumerate over all the parameters of this module and its descendents." + """Enumerate over all the parameters of this module and its descendents.""" raise NotImplementedError("Need to include this file from past assignment.") def add_parameter(self, k: str, v: Any) -> Parameter: - """ - Manually add a parameter. Useful helper for scalar parameters. + """Manually add a parameter. Useful helper for scalar parameters. Args: + ---- k: Local name of the parameter. v: Value for the parameter. Returns: + ------- Newly created parameter. + """ val = Parameter(v, k) self.__dict__["_parameters"][k] = val @@ -114,8 +116,7 @@ def _addindent(s_: str, numSpaces: int) -> str: class Parameter: - """ - A Parameter is a special container stored in a `Module`. + """A Parameter is a special container stored in a `Module`. It is designed to hold a `Variable`, but we allow it to hold any value for testing. @@ -130,7 +131,7 @@ def __init__(self, x: Any, name: Optional[str] = None) -> None: self.value.name = self.name def update(self, x: Any) -> None: - "Update the parameter value." + """Update the parameter value.""" self.value = x if hasattr(x, "requires_grad_"): self.value.requires_grad_(True) diff --git a/minitorch/operators.py b/minitorch/operators.py index 895ae82d..0be83e33 100644 --- a/minitorch/operators.py +++ b/minitorch/operators.py @@ -1,185 +1,52 @@ -""" -Collection of the core mathematical operators used throughout the code base. -""" +"""Collection of the core mathematical operators used throughout the code base.""" import math -from typing import Callable, Iterable # ## Task 0.1 +from typing import Callable, Iterable + # # Implementation of a prelude of elementary functions. - -def mul(x: float, y: float) -> float: - "$f(x, y) = x * y$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def id(x: float) -> float: - "$f(x) = x$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def add(x: float, y: float) -> float: - "$f(x, y) = x + y$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def neg(x: float) -> float: - "$f(x) = -x$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def lt(x: float, y: float) -> float: - "$f(x) =$ 1.0 if x is less than y else 0.0" - raise NotImplementedError("Need to include this file from past assignment.") - - -def eq(x: float, y: float) -> float: - "$f(x) =$ 1.0 if x is equal to y else 0.0" - raise NotImplementedError("Need to include this file from past assignment.") - - -def max(x: float, y: float) -> float: - "$f(x) =$ x if x is greater than y else y" - raise NotImplementedError("Need to include this file from past assignment.") - - -def is_close(x: float, y: float) -> float: - "$f(x) = |x - y| < 1e-2$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def sigmoid(x: float) -> float: - r""" - $f(x) = \frac{1.0}{(1.0 + e^{-x})}$ - - (See https://en.wikipedia.org/wiki/Sigmoid_function ) - - Calculate as - - $f(x) = \frac{1.0}{(1.0 + e^{-x})}$ if x >=0 else $\frac{e^x}{(1.0 + e^{x})}$ - - for stability. - """ - raise NotImplementedError("Need to include this file from past assignment.") - - -def relu(x: float) -> float: - """ - $f(x) =$ x if x is greater than 0, else 0 - - (See https://en.wikipedia.org/wiki/Rectifier_(neural_networks) .) - """ - raise NotImplementedError("Need to include this file from past assignment.") - - -EPS = 1e-6 - - -def log(x: float) -> float: - "$f(x) = log(x)$" - return math.log(x + EPS) - - -def exp(x: float) -> float: - "$f(x) = e^{x}$" - return math.exp(x) - - -def log_back(x: float, d: float) -> float: - r"If $f = log$ as above, compute $d \times f'(x)$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def inv(x: float) -> float: - "$f(x) = 1/x$" - raise NotImplementedError("Need to include this file from past assignment.") - - -def inv_back(x: float, d: float) -> float: - r"If $f(x) = 1/x$ compute $d \times f'(x)$" - raise NotImplementedError("Need to include this file from past assignment.") +# Mathematical functions: +# - mul +# - id +# - add +# - neg +# - lt +# - eq +# - max +# - is_close +# - sigmoid +# - relu +# - log +# - exp +# - log_back +# - inv +# - inv_back +# - relu_back +# +# For sigmoid calculate as: +# $f(x) = \frac{1.0}{(1.0 + e^{-x})}$ if x >=0 else $\frac{e^x}{(1.0 + e^{x})}$ +# For is_close: +# $f(x) = |x - y| < 1e-2$ -def relu_back(x: float, d: float) -> float: - r"If $f = relu$ compute $d \times f'(x)$" - raise NotImplementedError("Need to include this file from past assignment.") # ## Task 0.3 # Small practice library of elementary higher-order functions. - -def map(fn: Callable[[float], float]) -> Callable[[Iterable[float]], Iterable[float]]: - """ - Higher-order map. - - See https://en.wikipedia.org/wiki/Map_(higher-order_function) - - Args: - fn: Function from one value to one value. - - Returns: - A function that takes a list, applies `fn` to each element, and returns a - new list - """ - raise NotImplementedError("Need to include this file from past assignment.") - - -def negList(ls: Iterable[float]) -> Iterable[float]: - "Use `map` and `neg` to negate each element in `ls`" - raise NotImplementedError("Need to include this file from past assignment.") - - -def zipWith( - fn: Callable[[float, float], float] -) -> Callable[[Iterable[float], Iterable[float]], Iterable[float]]: - """ - Higher-order zipwith (or map2). - - See https://en.wikipedia.org/wiki/Map_(higher-order_function) - - Args: - fn: combine two values - - Returns: - Function that takes two equally sized lists `ls1` and `ls2`, produce a new list by - applying fn(x, y) on each pair of elements. - - """ - raise NotImplementedError("Need to include this file from past assignment.") - - -def addLists(ls1: Iterable[float], ls2: Iterable[float]) -> Iterable[float]: - "Add the elements of `ls1` and `ls2` using `zipWith` and `add`" - raise NotImplementedError("Need to include this file from past assignment.") - - -def reduce( - fn: Callable[[float, float], float], start: float -) -> Callable[[Iterable[float]], float]: - r""" - Higher-order reduce. - - Args: - fn: combine two values - start: start value $x_0$ - - Returns: - Function that takes a list `ls` of elements - $x_1 \ldots x_n$ and computes the reduction :math:`fn(x_3, fn(x_2, - fn(x_1, x_0)))` - """ - raise NotImplementedError("Need to include this file from past assignment.") - - -def sum(ls: Iterable[float]) -> float: - "Sum up a list using `reduce` and `add`." - raise NotImplementedError("Need to include this file from past assignment.") +# Implement the following core functions +# - map +# - zipWith +# - reduce +# +# Use these to implement +# - negList : negate a list +# - addLists : add two lists together +# - sum: sum lists +# - prod: take the product of lists -def prod(ls: Iterable[float]) -> float: - "Product of a list using `reduce` and `mul`." - raise NotImplementedError("Need to include this file from past assignment.") diff --git a/minitorch/scalar.py b/minitorch/scalar.py index f5abbe9e..b363599c 100644 --- a/minitorch/scalar.py +++ b/minitorch/scalar.py @@ -5,6 +5,7 @@ import numpy as np +from dataclasses import field from .autodiff import Context, Variable, backpropagate, central_difference from .scalar_functions import ( EQ, @@ -25,11 +26,11 @@ @dataclass class ScalarHistory: - """ - `ScalarHistory` stores the history of `Function` operations that was + """`ScalarHistory` stores the history of `Function` operations that was used to construct the current Variable. - Attributes: + Attributes + ---------- last_fn : The last Function that was called. ctx : The context for that Function. inputs : The inputs that were given when `last_fn.forward` was called. @@ -47,40 +48,30 @@ class ScalarHistory: _var_count = 0 +@dataclass class Scalar: - """ - A reimplementation of scalar values for autodifferentiation + """A reimplementation of scalar values for autodifferentiation tracking. Scalar Variables behave as close as possible to standard Python numbers while also tracking the operations that led to the number's creation. They can only be manipulated by `ScalarFunction`. """ - history: Optional[ScalarHistory] - derivative: Optional[float] data: float - unique_id: int - name: str - - def __init__( - self, - v: float, - back: ScalarHistory = ScalarHistory(), - name: Optional[str] = None, - ): + history: Optional[ScalarHistory] = field(default_factory=ScalarHistory) + derivative: Optional[float] = None + name: str = field(default="") + unique_id: int = field(default=0) + + def __post_init__(self): global _var_count _var_count += 1 - self.unique_id = _var_count - self.data = float(v) - self.history = back - self.derivative = None - if name is not None: - self.name = name - else: - self.name = str(self.unique_id) + object.__setattr__(self, "unique_id", _var_count) + object.__setattr__(self, "name", str(self.unique_id)) + object.__setattr__(self, "data", float(self.data)) def __repr__(self) -> str: - return "Scalar(%f)" % self.data + return f"Scalar({self.data})" def __mul__(self, b: ScalarLike) -> Scalar: return Mul.apply(self, b) @@ -91,72 +82,33 @@ def __truediv__(self, b: ScalarLike) -> Scalar: def __rtruediv__(self, b: ScalarLike) -> Scalar: return Mul.apply(b, Inv.apply(self)) - def __add__(self, b: ScalarLike) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - def __bool__(self) -> bool: return bool(self.data) - def __lt__(self, b: ScalarLike) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def __gt__(self, b: ScalarLike) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def __eq__(self, b: ScalarLike) -> Scalar: # type: ignore[override] - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def __sub__(self, b: ScalarLike) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def __neg__(self) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - def __radd__(self, b: ScalarLike) -> Scalar: return self + b def __rmul__(self, b: ScalarLike) -> Scalar: return self * b - def log(self) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def exp(self) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def sigmoid(self) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - def relu(self) -> Scalar: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - # Variable elements for backprop def accumulate_derivative(self, x: Any) -> None: - """ - Add `val` to the the derivative accumulated on this variable. + """Add `val` to the the derivative accumulated on this variable. Should only be called during autodifferentiation on leaf variables. Args: + ---- x: value to be accumulated + """ assert self.is_leaf(), "Only leaf variables can have derivatives." if self.derivative is None: - self.derivative = 0.0 - self.derivative += x + self.__setattr__("derivative", 0.0) + self.__setattr__("derivative", self.derivative + x) def is_leaf(self) -> bool: - "True if this variable created by the user (no `last_fn`)" + """True if this variable created by the user (no `last_fn`)""" return self.history is not None and self.history.last_fn is None def is_constant(self) -> bool: @@ -177,26 +129,31 @@ def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]: raise NotImplementedError("Need to implement for Task 1.3") def backward(self, d_output: Optional[float] = None) -> None: - """ - Calls autodiff to fill in the derivatives for the history of this object. + """Calls autodiff to fill in the derivatives for the history of this object. Args: + ---- d_output (number, opt): starting derivative to backpropagate through the model (typically left out, and assumed to be 1.0). + """ if d_output is None: d_output = 1.0 backpropagate(self, d_output) + # TODO: Implement for Task 1.2. + raise NotImplementedError("Need to implement for Task 1.2") + def derivative_check(f: Any, *scalars: Scalar) -> None: - """ - Checks that autodiff works on a python function. + """Checks that autodiff works on a python function. Asserts False if derivative is incorrect. - Parameters: + Parameters + ---------- f : function from n-scalars to 1-scalar. *scalars : n input scalar values. + """ out = f(*scalars) out.backward() diff --git a/minitorch/scalar_functions.py b/minitorch/scalar_functions.py index d8d2307b..2eba3039 100644 --- a/minitorch/scalar_functions.py +++ b/minitorch/scalar_functions.py @@ -13,23 +13,15 @@ from .scalar import Scalar, ScalarLike -def wrap_tuple(x): # type: ignore - "Turn a possible value into a tuple" +def wrap_tuple(x: float | Tuple[float, ...]) -> Tuple[float, ...]: + """Turn a possible value into a tuple""" if isinstance(x, tuple): return x return (x,) -def unwrap_tuple(x): # type: ignore - "Turn a singleton tuple into a value" - if len(x) == 1: - return x[0] - return x - - class ScalarFunction: - """ - A wrapper for a mathematical function that processes and produces + """A wrapper for a mathematical function that processes and produces Scalar variables. This is a static class and is never instantiated. We use `class` @@ -45,7 +37,7 @@ def _forward(cls, ctx: Context, *inps: float) -> float: return cls.forward(ctx, *inps) # type: ignore @classmethod - def apply(cls, *vals: "ScalarLike") -> Scalar: + def apply(cls, *vals: ScalarLike) -> Scalar: raw_vals = [] scalars = [] for v in vals: @@ -70,7 +62,7 @@ def apply(cls, *vals: "ScalarLike") -> Scalar: # Examples class Add(ScalarFunction): - "Addition function $f(x, y) = x + y$" + """Addition function $f(x, y) = x + y$""" @staticmethod def forward(ctx: Context, a: float, b: float) -> float: @@ -82,7 +74,7 @@ def backward(ctx: Context, d_output: float) -> Tuple[float, ...]: class Log(ScalarFunction): - "Log function $f(x) = log(x)$" + """Log function $f(x) = log(x)$""" @staticmethod def forward(ctx: Context, a: float) -> float: @@ -98,113 +90,4 @@ def backward(ctx: Context, d_output: float) -> float: # To implement. -class Mul(ScalarFunction): - "Multiplication function" - - @staticmethod - def forward(ctx: Context, a: float, b: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> Tuple[float, float]: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class Inv(ScalarFunction): - "Inverse function" - - @staticmethod - def forward(ctx: Context, a: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> float: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class Neg(ScalarFunction): - "Negation function" - - @staticmethod - def forward(ctx: Context, a: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> float: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class Sigmoid(ScalarFunction): - "Sigmoid function" - - @staticmethod - def forward(ctx: Context, a: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> float: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class ReLU(ScalarFunction): - "ReLU function" - - @staticmethod - def forward(ctx: Context, a: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> float: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class Exp(ScalarFunction): - "Exp function" - - @staticmethod - def forward(ctx: Context, a: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> float: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class LT(ScalarFunction): - "Less-than function $f(x) =$ 1.0 if x is less than y else 0.0" - - @staticmethod - def forward(ctx: Context, a: float, b: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> Tuple[float, float]: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") - - -class EQ(ScalarFunction): - "Equal function $f(x) =$ 1.0 if x is equal to y else 0.0" - - @staticmethod - def forward(ctx: Context, a: float, b: float) -> float: - # TODO: Implement for Task 1.2. - raise NotImplementedError("Need to implement for Task 1.2") - - @staticmethod - def backward(ctx: Context, d_output: float) -> Tuple[float, float]: - # TODO: Implement for Task 1.4. - raise NotImplementedError("Need to implement for Task 1.4") +# TODO: Implement for Task 1.2. diff --git a/project/app.py b/project/app.py index 5ed4ec27..7251c682 100644 --- a/project/app.py +++ b/project/app.py @@ -19,9 +19,7 @@ st.sidebar.markdown( """

MiniTorch

{} -""".format( - get_img_tag("https://minitorch.github.io/_images/match.png", width="40") - ), +""".format(get_img_tag("https://minitorch.github.io/logo-sm.png", width="40")), unsafe_allow_html=True, ) diff --git a/project/graph_builder.py b/project/graph_builder.py index a037e4fc..221ad9c6 100644 --- a/project/graph_builder.py +++ b/project/graph_builder.py @@ -1,7 +1,15 @@ import networkx as nx - +from dataclasses import dataclass import minitorch +if hasattr(minitorch, "Scalar"): + Scalar = minitorch.Scalar # type: ignore +else: + + @dataclass + class Scalar: + name: str + def build_expression(code): out = eval( @@ -17,7 +25,6 @@ def build_expression(code): def build_tensor_expression(code): - variables = { "x": minitorch.tensor([[1.0, 2.0, 3.0]], requires_grad=True), "y": minitorch.tensor([[1.0, 2.0, 3.0]], requires_grad=True), @@ -39,7 +46,7 @@ def __init__(self): self.intermediates = {} def get_name(self, x): - if not isinstance(x, minitorch.Scalar) and not isinstance(x, minitorch.Tensor): + if not isinstance(x, Scalar) and not isinstance(x, minitorch.Tensor): return "constant %s" % (x,) elif len(x.name) > 15: if x.name in self.intermediates: @@ -72,7 +79,7 @@ def run(self, final): G.add_edge(self.get_name(input), op, f"{i}") for input in cur.history.inputs: - if not isinstance(input, minitorch.Scalar) and not isinstance( + if not isinstance(input, Scalar) and not isinstance( input, minitorch.Tensor ): continue diff --git a/project/interface/plots.py b/project/interface/plots.py index 83023064..92d88226 100644 --- a/project/interface/plots.py +++ b/project/interface/plots.py @@ -164,7 +164,6 @@ def plot_function(title, fn, arange=[(i / 10.0) - 5 for i in range(0, 100)], fn2 def plot_function3D(title, fn, arange=[(i / 5.0) - 4.0 for i in range(0, 40)]): - xs = [((x / 10.0) - 5.0 + 1e-5) for x in range(1, 100)] ys = [((x / 10.0) - 5.0 + 1e-5) for x in range(1, 100)] zs = [[fn(x, y) for x in xs] for y in ys] diff --git a/project/interface/streamlit_utils.py b/project/interface/streamlit_utils.py index cebf4ff0..d417318a 100644 --- a/project/interface/streamlit_utils.py +++ b/project/interface/streamlit_utils.py @@ -20,17 +20,13 @@ def get_img_tag(src, width=None): width: {}px; }} - """.format( - img_id, width - ) + """.format(img_id, width) else: style = "" return """ img-{} {} - """.format( - src, img_id, img_id, style - ) + """.format(src, img_id, img_id, style) def render_function(fn): diff --git a/project/math_interface.py b/project/math_interface.py index db799683..606ae9a6 100644 --- a/project/math_interface.py +++ b/project/math_interface.py @@ -65,7 +65,6 @@ def render_math_sandbox(use_scalar=False, use_tensor=False): st.graphviz_chart(nx.nx_pydot.to_pydot(G).to_string()) if f_type == "Two Arg": - st.write("### " + name) render_function(scalar) st.write("Function f(x, y)") diff --git a/project/run_manual.py b/project/run_manual.py index 302846fb..d14c7802 100644 --- a/project/run_manual.py +++ b/project/run_manual.py @@ -2,6 +2,7 @@ Be sure you have minitorch installed in you Virtual Env. >>> pip install -Ue . """ + import random import minitorch diff --git a/project/run_scalar.py b/project/run_scalar.py index 7ce5207b..3cc679b1 100644 --- a/project/run_scalar.py +++ b/project/run_scalar.py @@ -2,6 +2,7 @@ Be sure you have minitorch installed in you Virtual Env. >>> pip install -Ue . """ + import random import minitorch diff --git a/project/run_torch.py b/project/run_torch.py index 0049ada5..6017b647 100644 --- a/project/run_torch.py +++ b/project/run_torch.py @@ -56,7 +56,6 @@ def train( losses = [] for epoch in range(1, max_epochs + 1): - # Forward out = model.forward(torch.tensor(data.X, requires_grad=True)).view(data.N) y = torch.tensor(data.y) diff --git a/project/show_expression_interface.py b/project/show_expression_interface.py index 16a59caa..75729ba5 100644 --- a/project/show_expression_interface.py +++ b/project/show_expression_interface.py @@ -5,7 +5,6 @@ def render_show_expression(tensor=False): - if tensor: st.text("Build an expression of tensors x, y, and z. (All the same shape)") code = st_ace( diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..442ba844 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,151 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "minitorch" +version = "0.5" + +[tool.pyright] +include = ["**/minitorch"] +ignore = [ + "**/docs", + "**/docs/module1/**", + "**/assignments", + "**/project", + "**/mt_diagrams", + "**/.*", + "*chainrule.py*", +] +venvPath = "." +venv = ".venv" +reportUnknownMemberType = "none" +reportUnknownParameterType = "none" +reportUnknownArgumentType = "none" +reportUnknownVariableType = "none" +reportMissingTypeArgument = "none" +reportMissingTypeStubs = "none" +reportUnusedExpression = "none" +reportUnknownLambdaType = "none" +reportIncompatibleMethodOverride = "none" +reportPrivateUsage = "none" +reportMissingParameterType = "error" + + +[tool.pytest.ini_options] +markers = [ + "task0_0", + "task0_1", + "task0_2", + "task0_3", + "task0_4", + "task1_0", + "task1_1", + "task1_2", + "task1_3", + "task1_4", + "task2_0", + "task2_1", + "task2_2", + "task2_3", + "task2_4", + "task3_0", + "task3_1", + "task3_2", + "task3_3", + "task3_4", + "task4_0", + "task4_1", + "task4_2", + "task4_3", + "task4_4", +] +[tool.ruff] + +exclude = [ + ".git", + "__pycache__", + "**/docs/slides/*", + "old,build", + "dist", + "**/project/**/*", + "**/mt_diagrams/*", + "**/minitorch/testing.py", + "**/docs/**/*", +] + +ignore = [ + "ANN101", + "ANN401", + "N801", + "E203", + "E266", + "E501", + "E741", + "N803", + "N802", + "N806", + "D400", + "D401", + "D105", + "D415", + "D402", + "D205", + "D100", + "D101", + "D107", + "D213", + "ANN204", + "ANN102", +] +select = ["D", "E", "F", "N", "ANN"] +fixable = [ + "A", + "B", + "C", + "D", + "E", + "F", + "G", + "I", + "N", + "Q", + "S", + "T", + "W", + "ANN", + "ARG", + "BLE", + "COM", + "DJ", + "DTZ", + "EM", + "ERA", + "EXE", + "FBT", + "ICN", + "INP", + "ISC", + "NPY", + "PD", + "PGH", + "PIE", + "PL", + "PT", + "PTH", + "PYI", + "RET", + "RSE", + "RUF", + "SIM", + "SLF", + "TCH", + "TID", + "TRY", + "UP", + "YTT", +] +unfixable = [] + +[tool.ruff.extend-per-file-ignores] +"tests/**/*.py" = ["D"] diff --git a/requirements.extra.txt b/requirements.extra.txt index ab0402cd..070fa1d0 100644 --- a/requirements.extra.txt +++ b/requirements.extra.txt @@ -1,6 +1,5 @@ datasets==2.4.0 embeddings==0.0.8 -networkx==2.4 plotly==4.14.3 pydot==1.4.1 python-mnist @@ -8,4 +7,5 @@ streamlit==1.12.0 streamlit-ace torch watchdog==1.0.2 -altair<5 +altair==4.2.2 +networkx==3.3 diff --git a/requirements.txt b/requirements.txt index 40efede3..c9cd8a02 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,9 @@ colorama==0.4.3 hypothesis == 6.54 -mypy == 0.971 -numba == 0.56 -numpy == 1.22 +numba == 0.60 +numpy == 2.0.0 pre-commit == 2.20.0 -pytest == 7.1.2 +pytest == 8.3.2 pytest-env pytest-runner == 5.2 typing_extensions diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 8a5fbd84..00000000 --- a/setup.cfg +++ /dev/null @@ -1,61 +0,0 @@ -[metadata] -name=minitorch -version=0.4 - -[files] -packages = - minitorch - -[darglint] -ignore_regex=((^_(.*))|(.*map)|(.*zip)|(.*reduce)|(test.*)|(tensor_.*)) -docstring_style=google -strictness=long - -[flake8] -ignore = N801, E203, E266, E501, W503, F812, E741, N803, N802, N806 -exclude = .git,__pycache__,docs/*,old,build,dist - -[isort] -profile=black -src_paths=minitorch,test - -[mypy] -strict = True -ignore_missing_imports = True -exclude=^(docs/)|(project/)|(assignments/) -implicit_reexport = True - -[mypy-tests.*] -disallow_untyped_decorators = False -implicit_reexport = True - -[black] -exclude=^(docs/)|(project/)|(assignments/) - -[tool:pytest] -markers = - task0_0 - task0_1 - task0_2 - task0_3 - task0_4 - task1_0 - task1_1 - task1_2 - task1_3 - task1_4 - task2_0 - task2_1 - task2_2 - task2_3 - task2_4 - task3_0 - task3_1 - task3_2 - task3_3 - task3_4 - task4_0 - task4_1 - task4_2 - task4_3 - task4_4 diff --git a/tests/strategies.py b/tests/strategies.py index 3dcce7e7..41b07662 100644 --- a/tests/strategies.py +++ b/tests/strategies.py @@ -3,6 +3,7 @@ import minitorch + settings.register_profile("ci", deadline=None) settings.load_profile("ci") diff --git a/tests/test_autodiff.py b/tests/test_autodiff.py index fc40f00c..bf22db2d 100644 --- a/tests/test_autodiff.py +++ b/tests/test_autodiff.py @@ -13,25 +13,25 @@ class Function1(ScalarFunction): @staticmethod def forward(ctx: Context, x: float, y: float) -> float: - "$f(x, y) = x + y + 10$" + """$f(x, y) = x + y + 10$""" return x + y + 10 @staticmethod def backward(ctx: Context, d_output: float) -> Tuple[float, float]: - "Derivatives are $f'_x(x, y) = 1$ and $f'_y(x, y) = 1$" + """Derivatives are $f'_x(x, y) = 1$ and $f'_y(x, y) = 1$""" return d_output, d_output class Function2(ScalarFunction): @staticmethod def forward(ctx: Context, x: float, y: float) -> float: - "$f(x, y) = x \times y + x$" + """$f(x, y) = x \times y + x$""" ctx.save_for_backward(x, y) return x * y + x @staticmethod def backward(ctx: Context, d_output: float) -> Tuple[float, float]: - "Derivatives are $f'_x(x, y) = y + 1$ and $f'_y(x, y) = x$" + """Derivatives are $f'_x(x, y) = y + 1$ and $f'_y(x, y) = x$""" x, y = ctx.saved_values return d_output * (y + 1), d_output * x @@ -64,7 +64,7 @@ def test_chain_rule2() -> None: @pytest.mark.task1_3 def test_chain_rule3() -> None: - "Check that constrants are ignored and variables get derivatives." + """Check that constrants are ignored and variables get derivatives.""" constant = 10 var = minitorch.Scalar(5) diff --git a/tests/test_module.py b/tests/test_module.py index db764afb..d12dbc81 100644 --- a/tests/test_module.py +++ b/tests/test_module.py @@ -44,7 +44,7 @@ def __init__(self) -> None: @pytest.mark.task0_4 def test_stacked_demo() -> None: - "Check that each of the properties match" + """Check that each of the properties match""" mod = ModuleA1() np = dict(mod.named_parameters()) @@ -95,7 +95,7 @@ def __init__(self) -> None: @pytest.mark.task0_4 @given(med_ints, med_ints) def test_module(size_a: int, size_b: int) -> None: - "Check the properties of a single module" + """Check the properties of a single module""" module = Module2() module.eval() assert not module.training @@ -116,7 +116,7 @@ def test_module(size_a: int, size_b: int) -> None: @pytest.mark.task0_4 @given(med_ints, med_ints, small_floats) def test_stacked_module(size_a: int, size_b: int, val: float) -> None: - "Check the properties of a stacked module" + """Check the properties of a stacked module""" module = Module1(size_a, size_b, val) module.eval() assert not module.training diff --git a/tests/test_operators.py b/tests/test_operators.py index b0b84101..b030508f 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -5,6 +5,7 @@ from hypothesis.strategies import lists from minitorch import MathTest +import minitorch from minitorch.operators import ( add, addLists, @@ -22,7 +23,6 @@ relu, relu_back, sigmoid, - sum, ) from .strategies import assert_close, small_floats @@ -33,7 +33,7 @@ @pytest.mark.task0_1 @given(small_floats, small_floats) def test_same_as_python(x: float, y: float) -> None: - "Check that the main operators all return the same value of the python version" + """Check that the main operators all return the same value of the python version""" assert_close(mul(x, y), x * y) assert_close(add(x, y), x + y) assert_close(neg(x), -x) @@ -69,7 +69,7 @@ def test_id(a: float) -> None: @pytest.mark.task0_1 @given(small_floats) def test_lt(a: float) -> None: - "Check that a - 1.0 is always less than a" + """Check that a - 1.0 is always less than a""" assert lt(a - 1.0, a) == 1.0 assert lt(a, a - 1.0) == 0.0 @@ -113,14 +113,13 @@ def test_sigmoid(a: float) -> None: @pytest.mark.task0_2 @given(small_floats, small_floats, small_floats) def test_transitive(a: float, b: float, c: float) -> None: - "Test the transitive property of less-than (a < b and b < c implies a < c)" + """Test the transitive property of less-than (a < b and b < c implies a < c)""" raise NotImplementedError("Need to include this file from past assignment.") @pytest.mark.task0_2 def test_symmetric() -> None: - """ - Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e. + """Write a test that ensures that :func:`minitorch.operators.mul` is symmetric, i.e. gives the same value regardless of the order of its input. """ raise NotImplementedError("Need to include this file from past assignment.") @@ -128,8 +127,7 @@ def test_symmetric() -> None: @pytest.mark.task0_2 def test_distribute() -> None: - r""" - Write a test that ensures that your operators distribute, i.e. + r"""Write a test that ensures that your operators distribute, i.e. :math:`z \times (x + y) = z \times x + z \times y` """ raise NotImplementedError("Need to include this file from past assignment.") @@ -137,9 +135,7 @@ def test_distribute() -> None: @pytest.mark.task0_2 def test_other() -> None: - """ - Write a test that ensures some other property holds for your functions. - """ + """Write a test that ensures some other property holds for your functions.""" raise NotImplementedError("Need to include this file from past assignment.") @@ -164,8 +160,7 @@ def test_zip_with(a: float, b: float, c: float, d: float) -> None: lists(small_floats, min_size=5, max_size=5), ) def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None: - """ - Write a test that ensures that the sum of `ls1` plus the sum of `ls2` + """Write a test that ensures that the sum of `ls1` plus the sum of `ls2` is the same as the sum of each element of `ls1` plus each element of `ls2`. """ raise NotImplementedError("Need to include this file from past assignment.") @@ -174,7 +169,7 @@ def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None: @pytest.mark.task0_3 @given(lists(small_floats)) def test_sum(ls: List[float]) -> None: - assert_close(sum(ls), sum(ls)) + assert_close(sum(ls), minitorch.operators.sum(ls)) @pytest.mark.task0_3