diff --git a/.github/workflows/benchmark_pr.yml b/.github/workflows/benchmark_pr.yml
new file mode 100644
index 00000000..4ad1b6a1
--- /dev/null
+++ b/.github/workflows/benchmark_pr.yml
@@ -0,0 +1,64 @@
+name: Benchmark a pull request
+
+on:
+  pull_request_target:
+    branches:
+      - main
+
+permissions:
+  pull-requests: write
+
+jobs:
+    generate_plots:
+        runs-on: ubuntu-latest
+
+        steps:
+            - uses: actions/checkout@v2
+            - uses: julia-actions/setup-julia@v1
+              with:
+                version: "1.9"
+            - uses: julia-actions/cache@v1
+            - name: Extract Package Name from Project.toml
+              id: extract-package-name
+              run: |
+                PACKAGE_NAME=$(grep "^name" Project.toml | sed 's/^name = "\(.*\)"$/\1/')
+                echo "::set-output name=package_name::$PACKAGE_NAME"
+            - name: Build AirspeedVelocity
+              env:
+                JULIA_NUM_THREADS: 2
+              run: |
+                # Lightweight build step, as sometimes the runner runs out of memory:
+                julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.add(;url="https://github.com/MilesCranmer/AirspeedVelocity.jl.git")'
+                julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.build("AirspeedVelocity")'
+            - name: Add ~/.julia/bin to PATH
+              run: |
+                echo "$HOME/.julia/bin" >> $GITHUB_PATH
+            - name: Run benchmarks
+              run: |
+                echo $PATH
+                ls -l ~/.julia/bin
+                mkdir results
+                benchpkg ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --url=${{ github.event.repository.clone_url }} --bench-on="${{github.event.pull_request.head.sha}}" --output-dir=results/ --tune --exeflags="-O3 --threads=auto"
+            - name: Create markdown table from benchmarks
+              run: |
+                benchpkgtable ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --input-dir=results/ --ratio > table.md
+                echo '### Benchmark Results' > body.md
+                echo '' >> body.md
+                echo '' >> body.md
+                cat table.md >> body.md
+                echo '' >> body.md
+            - name: Find Comment
+              uses: peter-evans/find-comment@v2
+              id: fcbenchmark
+              with:
+                issue-number: ${{ github.event.pull_request.number }}
+                comment-author: 'github-actions[bot]'
+                body-includes: Benchmark Results
+
+            - name: Comment on PR
+              uses: peter-evans/create-or-update-comment@v3
+              with:
+                comment-id: ${{ steps.fcbenchmark.outputs.comment-id }}
+                issue-number: ${{ github.event.pull_request.number }}
+                body-path: body.md
+                edit-mode: replace
diff --git a/benchmark/Project.toml b/benchmark/Project.toml
new file mode 100644
index 00000000..05a4894b
--- /dev/null
+++ b/benchmark/Project.toml
@@ -0,0 +1,2 @@
+[deps]
+BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl
new file mode 100644
index 00000000..38bbfd07
--- /dev/null
+++ b/benchmark/benchmarks.jl
@@ -0,0 +1,62 @@
+using BenchmarkTools
+using PythonCall
+using PythonCall: pydel!, pyimport, pydict, pystr, pyrange
+
+const SUITE = BenchmarkGroup()
+
+function test_pydict_init()
+    random = pyimport("random").random
+    x = pydict()
+    for i in pyrange(1000)
+        x[pystr(i)] = i + random()
+    end
+    return x
+end
+
+SUITE["basic"]["julia"]["pydict"]["init"] = @benchmarkable test_pydict_init()
+
+function test_pydict_pydel()
+    random = pyimport("random").random
+    x = pydict()
+    for i in pyrange(1000)
+        k = pystr(i)
+        r = random()
+        v = i + r
+        x[k] = v
+        pydel!(k)
+        pydel!(r)
+        pydel!(v)
+        pydel!(i)
+    end
+    return x
+end
+
+SUITE["basic"]["julia"]["pydict"]["pydel"] = @benchmarkable test_pydict_pydel()
+
+@generated function test_atpy(::Val{use_pydel}) where {use_pydel}
+    quote
+        @py begin
+            import random: random
+            x = {}
+            for i in range(1000)
+                x[str(i)] = i + random()
+                $(use_pydel ? :(@jl PythonCall.pydel!(i)) : :(nothing))
+            end
+            x
+        end
+    end
+end
+
+SUITE["basic"]["@py"]["pydict"]["init"] = @benchmarkable test_atpy(Val(false))
+SUITE["basic"]["@py"]["pydict"]["pydel"] = @benchmarkable test_atpy(Val(true))
+
+
+include("gcbench.jl")
+using .GCBench: append_lots
+
+SUITE["gc"]["full"] = @benchmarkable(
+    GC.gc(true),
+    setup=(GC.gc(true); append_lots(size=159)),
+    seconds=30,
+    evals=1,
+)
diff --git a/benchmark/gcbench.jl b/benchmark/gcbench.jl
new file mode 100644
index 00000000..d9e83070
--- /dev/null
+++ b/benchmark/gcbench.jl
@@ -0,0 +1,13 @@
+module GCBench
+
+using PythonCall
+
+function append_lots(; iters=100 * 1024, size=1596)
+    v = pylist()
+    for i = 1:iters
+        v.append(pylist(rand(size)))
+    end
+    return v
+end
+
+end