diff --git a/.github/workflows/gateway-sharness.yml b/.github/workflows/gateway-sharness.yml index 9980779d1..a2058126e 100644 --- a/.github/workflows/gateway-sharness.yml +++ b/.github/workflows/gateway-sharness.yml @@ -16,9 +16,9 @@ jobs: shell: bash steps: - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: - go-version: 1.22.x + go-version: 1.23.x - name: Checkout boxo uses: actions/checkout@v3 with: @@ -34,10 +34,11 @@ jobs: run: | go mod edit -replace=github.com/ipfs/boxo=../boxo make mod_tidy - cat go.mod working-directory: kubo - name: Install sharness dependencies - run: make test_sharness_deps + run: | + find . -name go.mod -execdir go mod tidy \; + make test_sharness_deps working-directory: kubo - name: Run Kubo Sharness Tests run: find . -maxdepth 1 -name "*gateway*.sh" -print0 | xargs -0 -I {} bash -c "echo {}; {}" diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a4c5d43a..49d1b9f45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,16 +17,137 @@ The following emojis are used to highlight certain changes: ### Added 🛠 - New non variadic `NotifyNewBlock` function. This changes the `blockservice.Interface`. The new function avoids allocating a slice on each call when called with one block. +### Changed + +* 🛠 `blockstore` and `blockservice`'s `WriteThrough()` option now takes an "enabled" parameter: `WriteThrough(enabled bool)`. + +### Removed + +### Fixed + +### Security + +## [v0.25.0] + +### Added + +- `routing/http/server`: added built-in Prometheus instrumentation to http delegated `/routing/v1/` endpoints, with custom buckets for response size and duration to match real world data observed at [the `delegated-ipfs.dev` instance](https://docs.ipfs.tech/concepts/public-utilities/#delegated-routing). [#718](https://github.com/ipfs/boxo/pull/718) [#724](https://github.com/ipfs/boxo/pull/724) +- `routing/http/server`: added configurable routing timeout (`DefaultRoutingTimeout` being 30s) to prevent indefinite hangs during content/peer routing. Set custom duration via `WithRoutingTimeout`. [#720](https://github.com/ipfs/boxo/pull/720) +- `routing/http/server`: exposes Prometheus metrics on `prometheus.DefaultRegisterer` and a custom one can be provided via `WithPrometheusRegistry` [#722](https://github.com/ipfs/boxo/pull/722) +- `gateway`: `NewCacheBlockStore` and `NewCarBackend` will use `prometheus.DefaultRegisterer` when a custom one is not specified via `WithPrometheusRegistry` [#722](https://github.com/ipfs/boxo/pull/722) +- `filestore`: added opt-in `WithMMapReader` option to `FileManager` to enable memory-mapped file reads [#665](https://github.com/ipfs/boxo/pull/665) +- `bitswap/routing` `ProviderQueryManager` does not require calling `Startup` separate from `New`. [#741](https://github.com/ipfs/boxo/pull/741) +- `bitswap/routing` ProviderQueryManager does not use liftcycle context. + +### Changed + +- `bitswap`, `routing`, `exchange` ([#641](https://github.com/ipfs/boxo/pull/641)): + - ✨ Bitswap is no longer in charge of providing blocks to the newtork: providing functionality is now handled by a `exchange/providing.Exchange`, meant to be used with `provider.System` so that all provides follow the same rules (multiple parts of the code where handling provides) before. + - 🛠 `bitswap/client/internal/providerquerymanager` has been moved to `routing/providerquerymanager` where it belongs. In order to keep compatibility, Bitswap now receives a `routing.ContentDiscovery` parameter which implements `FindProvidersAsync(...)` and uses it to create a `providerquerymanager` with the default settings as before. Custom settings can be used by using a custom `providerquerymanager` to manually wrap a `ContentDiscovery` object and pass that in as `ContentDiscovery` on initialization while setting `bitswap.WithDefaultProviderQueryManager(false)` (to avoid re-wrapping it again). + - The renovated `providedQueryManager` will trigger lookups until it manages to connect to `MaxProviders`. Before it would lookup at most `MaxInProcessRequests*MaxProviders` and connection failures may have limited the actual number of providers found. + - 🛠 We have aligned our routing-related interfaces with the libp2p [`routing`](https://pkg.go.dev/github.com/libp2p/go-libp2p/core/routing#ContentRouting) ones, including in the `reprovider.System`. + - In order to obtain exactly the same behaviour as before (i.e. particularly ensuring that new blocks are still provided), what was done like: + + ```go + bswapnet := network.NewFromIpfsHost(host, contentRouter) + bswap := bitswap.New(p.ctx, bswapnet, blockstore) + bserv = blockservice.New(blockstore, bswap) + ``` + - becomes: + + ```go + // Create network: no contentRouter anymore + bswapnet := network.NewFromIpfsHost(host) + // Create Bitswap: a new "discovery" parameter, usually the "contentRouter" + // which does both discovery and providing. + bswap := bitswap.New(p.ctx, bswapnet, discovery, blockstore) + // A provider system that handles concurrent provides etc. "contentProvider" + // is usually the "contentRouter" which does both discovery and providing. + // "contentProvider" could be used directly without wrapping, but it is recommended + // to do so to provide more efficiently. + provider := provider.New(datastore, provider.Online(contentProvider) + // A wrapped providing exchange using the previous exchange and the provider. + exch := providing.New(bswap, provider) + + // Finally the blockservice + bserv := blockservice.New(blockstore, exch) + ... + ``` + + - The above is only necessary if content routing is needed. Otherwise: + + ```go + // Create network: no contentRouter anymore + bswapnet := network.NewFromIpfsHost(host) + // Create Bitswap: a new "discovery" parameter set to nil (disable content discovery) + bswap := bitswap.New(p.ctx, bswapnet, nil, blockstore) + // Finally the blockservice + bserv := blockservice.New(blockstore, exch) + ``` +- `routing/http/client`: creating delegated routing client with `New` now defaults to querying delegated routing server with `DefaultProtocolFilter` ([IPIP-484](https://github.com/ipfs/specs/pull/484)) [#689](https://github.com/ipfs/boxo/pull/689) +- `bitswap/client`: Wait at lease one broadcast interval before resending wants to a peer. Check for peers to rebroadcast to more often than one broadcast interval. +- No longer using `github.com/jbenet/goprocess` to avoid requiring in dependents. [#710](https://github.com/ipfs/boxo/pull/710) +- `pinning/remote/client`: Refactor remote pinning `Ls` to take results channel instead of returning one. The previous `Ls` behavior is implemented by the GoLs function, which creates the channels, starts the goroutine that calls Ls, and returns the channels to the caller [#738](https://github.com/ipfs/boxo/pull/738) +- updated to go-libp2p to [v0.37.2](https://github.com/libp2p/go-libp2p/releases/tag/v0.37.2) + +### Fixed + +- Do not erroneously update the state of sent wants when a send a peer disconnected and the send did not happen. [#452](https://github.com/ipfs/boxo/pull/452) + +## [v0.24.3] + +### Changed + +- `go.mod` updates + +### Fixed + +- `bitswap/client` no longer logs `"Received provider X for cid Y not requested` to ERROR level, moved to DEBUG [#771](https://github.com/ipfs/boxo/pull/711) + +## [v0.24.2] + +### Changed + +- updated to go-libp2p to [v0.37.0](https://github.com/libp2p/go-libp2p/releases/tag/v0.37.0) +- `ipns/pb`: removed use of deprecated `Exporter` (SA1019, [golang/protobuf#1640](https://github.com/golang/protobuf/issues/1640), [9a7055](https://github.com/ipfs/boxo/pull/699/commits/9a7055e444527d5aad3187503a1b84bcae44f7b9)) + +### Fixed + +- `bitswap/client`: fix panic if current live count is greater than broadcast limit [#702](https://github.com/ipfs/boxo/pull/702) + +## [v0.24.1] + +### Changed + +- `routing/http/client`: creating delegated routing client with `New` now defaults to querying delegated routing server with `DefaultProtocolFilter` ([IPIP-484](https://github.com/ipfs/specs/pull/484)) [#689](https://github.com/ipfs/boxo/pull/689) +- updated go-libp2p to [v0.36.5](https://github.com/libp2p/go-libp2p/releases/tag/v0.36.5) +- updated dependencies [#693](https://github.com/ipfs/boxo/pull/693) +- update `go-libp2p-kad-dht` to [v0.27.0](https://github.com/libp2p/go-libp2p-kad-dht/releases/tag/v0.27.0) + +### Fixed + +- `routing/http/client`: optional address and protocol filter parameters from [IPIP-484](https://github.com/ipfs/specs/pull/484) use human-readable `,` instead of `%2C`. [#688](https://github.com/ipfs/boxo/pull/688) +- `bitswap/client` Cleanup live wants when wants are canceled. This prevents live wants from continuing to get rebroadcasted even after the wants are canceled. [#690](https://github.com/ipfs/boxo/pull/690) +- Fix problem adding invalid CID to exhausted wants list resulting in possible performance issue. [#692](https://github.com/ipfs/boxo/pull/692) + +## [v0.24.0] + +### Added + * `boxo/bitswap/server`: * A new [`WithWantHaveReplaceSize(n)`](https://pkg.go.dev/github.com/ipfs/boxo/bitswap/server/#WithWantHaveReplaceSize) option can be used with `bitswap.New` to fine-tune cost-vs-performance. It sets the maximum size of a block in bytes up to which the bitswap server will replace a WantHave with a WantBlock response. Setting this to 0 disables this WantHave replacement and means that block sizes are not read when processing WantHave requests. [#672](https://github.com/ipfs/boxo/pull/672) -- `routing/http`: added support for address and protocol filtering to the delegated routing server ([IPIP-484](https://github.com/ipfs/specs/pull/484)) [#671](https://github.com/ipfs/boxo/pull/671) +* `routing/http`: + * added support for address and protocol filtering to the delegated routing server ([IPIP-484](https://github.com/ipfs/specs/pull/484)) [#671](https://github.com/ipfs/boxo/pull/671) [#678](https://github.com/ipfs/boxo/pull/678) + * added support for address and protocol filtering to the delegated routing client ([IPIP-484](https://github.com/ipfs/specs/pull/484)) [#678](https://github.com/ipfs/boxo/pull/678). To add filtering to the client, use the [`WithFilterAddrs`](https://pkg.go.dev/github.com/ipfs/boxo/routing/http/client#WithFilterAddrs) and [`WithFilterProtocols`](https://pkg.go.dev/github.com/ipfs/boxo/routing/http/client#WithFilterProtocols) options when creating the client.Client-side filtering for servers that don't support filtering is enabled by default. To disable it, use the [`disableLocalFiltering`](https://pkg.go.dev/github.com/ipfs/boxo/routing/http/client#disableLocalFiltering) option when creating the client. ### Changed ### Removed ### Fixed -= `unixfs/hamt` Log error instead of panic if both link and shard are nil [#393](https://github.com/ipfs/boxo/pull/393) + +- `unixfs/hamt`: Log error instead of panic if both link and shard are nil [#393](https://github.com/ipfs/boxo/pull/393) +- `pinner/dspinner`: do not hang when listing keys and the `out` channel is no longer read [#727](https://github.com/ipfs/boxo/pull/727) ### Security @@ -62,6 +183,7 @@ The following emojis are used to highlight certain changes: - `bitswap/client` fix memory leak in BlockPresenceManager due to unlimited map growth. [#636](https://github.com/ipfs/boxo/pull/636) - `bitswap/network` fixed race condition when a timeout occurred before hole punching completed while establishing a first-time stream to a peer behind a NAT [#651](https://github.com/ipfs/boxo/pull/651) - `bitswap`: wantlist overflow handling now cancels existing entries to make room for newer entries. This fix prevents the wantlist from filling up with CIDs that the server does not have. [#629](https://github.com/ipfs/boxo/pull/629) +- 🛠 `bitswap` & `bitswap/server` no longer provide to content routers, instead you can use the `provider` package because it uses a datastore queue and batches calls to ProvideMany. ## [v0.21.0] diff --git a/bitswap/benchmarks_test.go b/bitswap/benchmarks_test.go index bd8f342ea..d1930c900 100644 --- a/bitswap/benchmarks_test.go +++ b/bitswap/benchmarks_test.go @@ -12,17 +12,16 @@ import ( "testing" "time" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-test/random" - protocol "github.com/libp2p/go-libp2p/core/protocol" - "github.com/ipfs/boxo/bitswap" bsnet "github.com/ipfs/boxo/bitswap/network" testinstance "github.com/ipfs/boxo/bitswap/testinstance" tn "github.com/ipfs/boxo/bitswap/testnet" mockrouting "github.com/ipfs/boxo/routing/mock" + blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" + "github.com/ipfs/go-test/random" + protocol "github.com/libp2p/go-libp2p/core/protocol" ) type fetchFunc func(b *testing.B, bs *bitswap.Bitswap, ks []cid.Cid) @@ -135,6 +134,7 @@ func BenchmarkFetchFromOldBitswap(b *testing.B) { benchmarkLog = nil fixedDelay := delay.Fixed(10 * time.Millisecond) bstoreLatency := time.Duration(0) + router := mockrouting.NewServer() for _, bch := range mixedBenches { b.Run(bch.name, func(b *testing.B) { @@ -142,17 +142,17 @@ func BenchmarkFetchFromOldBitswap(b *testing.B) { oldSeedCount := bch.oldSeedCount newSeedCount := bch.nodeCount - (fetcherCount + oldSeedCount) - net := tn.VirtualNetwork(mockrouting.NewServer(), fixedDelay) + net := tn.VirtualNetwork(fixedDelay) // Simulate an older Bitswap node (old protocol ID) that doesn't // send DONT_HAVE responses oldProtocol := []protocol.ID{bsnet.ProtocolBitswapOneOne} oldNetOpts := []bsnet.NetOpt{bsnet.SupportedProtocols(oldProtocol)} oldBsOpts := []bitswap.Option{bitswap.SetSendDontHaves(false)} - oldNodeGenerator := testinstance.NewTestInstanceGenerator(net, oldNetOpts, oldBsOpts) + oldNodeGenerator := testinstance.NewTestInstanceGenerator(net, router, oldNetOpts, oldBsOpts) // Regular new Bitswap node - newNodeGenerator := testinstance.NewTestInstanceGenerator(net, nil, nil) + newNodeGenerator := testinstance.NewTestInstanceGenerator(net, router, nil, nil) var instances []testinstance.Instance // Create new nodes (fetchers + seeds) @@ -294,9 +294,10 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { numblks := 1000 for i := 0; i < b.N; i++ { - net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) + net := tn.RateLimitedVirtualNetwork(d, rateLimitGenerator) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() instances := ig.Instances(numnodes) @@ -312,9 +313,9 @@ func BenchmarkDatacenterMultiLeechMultiSeed(b *testing.B) { func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { for i := 0; i < b.N; i++ { - net := tn.VirtualNetwork(mockrouting.NewServer(), d) - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(d) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) instances := ig.Instances(numnodes) rootBlock := random.BlocksOfSize(1, rootBlockSize) @@ -327,9 +328,9 @@ func subtestDistributeAndFetch(b *testing.B, numnodes, numblks int, d delay.D, b func subtestDistributeAndFetchRateLimited(b *testing.B, numnodes, numblks int, d delay.D, rateLimitGenerator tn.RateLimitGenerator, blockSize int64, bstoreLatency time.Duration, df distFunc, ff fetchFunc) { for i := 0; i < b.N; i++ { - net := tn.RateLimitedVirtualNetwork(mockrouting.NewServer(), d, rateLimitGenerator) - - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.RateLimitedVirtualNetwork(d, rateLimitGenerator) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() instances := ig.Instances(numnodes) @@ -437,7 +438,7 @@ func runDistribution(b *testing.B, instances []testinstance.Instance, blocks []b func allToAll(b *testing.B, provs []testinstance.Instance, blocks []blocks.Block) { for _, p := range provs { - if err := p.Blockstore().PutMany(context.Background(), blocks); err != nil { + if err := p.Blockstore.PutMany(context.Background(), blocks); err != nil { b.Fatal(err) } } @@ -452,10 +453,10 @@ func overlap1(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) bill := provs[0] jeff := provs[1] - if err := bill.Blockstore().PutMany(context.Background(), blks[:75]); err != nil { + if err := bill.Blockstore.PutMany(context.Background(), blks[:75]); err != nil { b.Fatal(err) } - if err := jeff.Blockstore().PutMany(context.Background(), blks[25:]); err != nil { + if err := jeff.Blockstore.PutMany(context.Background(), blks[25:]); err != nil { b.Fatal(err) } } @@ -473,12 +474,12 @@ func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) even := i%2 == 0 third := i%3 == 0 if third || even { - if err := bill.Blockstore().Put(context.Background(), blk); err != nil { + if err := bill.Blockstore.Put(context.Background(), blk); err != nil { b.Fatal(err) } } if third || !even { - if err := jeff.Blockstore().Put(context.Background(), blk); err != nil { + if err := jeff.Blockstore.Put(context.Background(), blk); err != nil { b.Fatal(err) } } @@ -490,7 +491,7 @@ func overlap2(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) // but we're mostly just testing performance of the sync algorithm func onePeerPerBlock(b *testing.B, provs []testinstance.Instance, blks []blocks.Block) { for _, blk := range blks { - err := provs[rand.Intn(len(provs))].Blockstore().Put(context.Background(), blk) + err := provs[rand.Intn(len(provs))].Blockstore.Put(context.Background(), blk) if err != nil { b.Fatal(err) } diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index 74ef79014..9ba51662e 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/ipfs/boxo/bitswap/client" - "github.com/ipfs/boxo/bitswap/internal/defaults" "github.com/ipfs/boxo/bitswap/message" "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/bitswap/server" @@ -45,9 +44,8 @@ type bitswap interface { } var ( - _ exchange.SessionExchange = (*Bitswap)(nil) - _ bitswap = (*Bitswap)(nil) - HasBlockBufferSize = defaults.HasBlockBufferSize + _ exchange.SessionExchange = (*Bitswap)(nil) + _ bitswap = (*Bitswap)(nil) ) type Bitswap struct { @@ -58,7 +56,7 @@ type Bitswap struct { net network.BitSwapNetwork } -func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Bitswap { +func New(ctx context.Context, net network.BitSwapNetwork, providerFinder client.ProviderFinder, bstore blockstore.Blockstore, options ...Option) *Bitswap { bs := &Bitswap{ net: net, } @@ -85,14 +83,10 @@ func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Bloc serverOptions = append(serverOptions, server.WithTracer(tracer)) } - if HasBlockBufferSize != defaults.HasBlockBufferSize { - serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) - } - ctx = metrics.CtxSubScope(ctx, "bitswap") bs.Server = server.New(ctx, net, bstore, serverOptions...) - bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) + bs.Client = client.New(ctx, net, providerFinder, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once return bs @@ -122,7 +116,6 @@ type Stat struct { MessagesReceived uint64 BlocksSent uint64 DataSent uint64 - ProvideBufLen int } func (bs *Bitswap) Stat() (*Stat, error) { @@ -145,16 +138,14 @@ func (bs *Bitswap) Stat() (*Stat, error) { Peers: ss.Peers, BlocksSent: ss.BlocksSent, DataSent: ss.DataSent, - ProvideBufLen: ss.ProvideBufLen, }, nil } func (bs *Bitswap) Close() error { bs.net.Stop() - return multierr.Combine( - bs.Client.Close(), - bs.Server.Close(), - ) + bs.Client.Close() + bs.Server.Close() + return nil } func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { diff --git a/bitswap/bitswap_test.go b/bitswap/bitswap_test.go index 85055879c..2fb32aa61 100644 --- a/bitswap/bitswap_test.go +++ b/bitswap/bitswap_test.go @@ -36,7 +36,7 @@ func isCI() bool { func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { t.Helper() - err := inst.Blockstore().Put(ctx, blk) + err := inst.Blockstore.Put(ctx, blk) if err != nil { t.Fatal(err) } @@ -51,8 +51,9 @@ func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk const kNetworkDelay = 0 * time.Millisecond func TestClose(t *testing.T) { - vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + vnet := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() block := random.BlocksOfSize(1, blockSize)[0] bitswap := ig.Next() @@ -65,14 +66,14 @@ func TestClose(t *testing.T) { } func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this - rs := mockrouting.NewServer() - net := tn.VirtualNetwork(rs, delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() block := blocks.NewBlock([]byte("block")) pinfo := p2ptestutil.RandTestBogusIdentityOrFatal(t) - err := rs.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network + err := router.Client(pinfo).Provide(context.Background(), block.Cid(), true) // but not on network if err != nil { t.Fatal(err) } @@ -90,9 +91,10 @@ func TestProviderForKeyButNetworkCannotFind(t *testing.T) { // TODO revisit this } func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -118,10 +120,11 @@ func TestGetBlockFromPeerAfterPeerAnnounces(t *testing.T) { } func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) - bsOpts := []bitswap.Option{bitswap.ProvideEnabled(false), bitswap.ProviderSearchDelay(50 * time.Millisecond)} - ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + bsOpts := []bitswap.Option{bitswap.ProviderSearchDelay(50 * time.Millisecond)} + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, bsOpts) defer ig.Close() hasBlock := ig.Next() @@ -150,12 +153,13 @@ func TestDoesNotProvideWhenConfiguredNotTo(t *testing.T) { // Tests that a received block is not stored in the blockstore if the block was // not requested by the client func TestUnwantedBlockNotAdded(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) block := blocks.NewBlock([]byte("block")) bsMessage := bsmsg.New(true) bsMessage.AddBlock(block) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -170,9 +174,9 @@ func TestUnwantedBlockNotAdded(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Peer, bsMessage) + doesNotWantBlock.Exchange.ReceiveMessage(ctx, hasBlock.Identity.ID(), bsMessage) - blockInStore, err := doesNotWantBlock.Blockstore().Has(ctx, block.Cid()) + blockInStore, err := doesNotWantBlock.Blockstore.Has(ctx, block.Cid()) if err != nil || blockInStore { t.Fatal("Unwanted block added to block store") } @@ -186,10 +190,11 @@ func TestUnwantedBlockNotAdded(t *testing.T) { // (because the live request queue is full) func TestPendingBlockAdded(t *testing.T) { ctx := context.Background() - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) sessionBroadcastWantCapacity := 4 - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() instance := ig.Instances(1)[0] @@ -277,8 +282,9 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if testing.Short() { t.SkipNow() } - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, []bitswap.Option{ + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, []bitswap.Option{ bitswap.TaskWorkerCount(5), bitswap.EngineTaskWorkerCount(5), bitswap.MaxOutstandingBytesPerPeer(1 << 20), @@ -333,16 +339,17 @@ func TestSendToWantingPeer(t *testing.T) { t.SkipNow() } - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() peers := ig.Instances(2) peerA := peers[0] peerB := peers[1] - t.Logf("Session %v\n", peerA.Peer) - t.Logf("Session %v\n", peerB.Peer) + t.Logf("Session %v\n", peerA.Identity.ID()) + t.Logf("Session %v\n", peerB.Identity.ID()) waitTime := time.Second * 5 @@ -370,8 +377,9 @@ func TestSendToWantingPeer(t *testing.T) { } func TestEmptyKey(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() bs := ig.Instances(1)[0].Exchange @@ -403,8 +411,9 @@ func assertStat(t *testing.T, st *bitswap.Stat, sblks, rblks, sdata, rdata uint6 } func TestBasicBitswap(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() t.Log("Test a one node trying to get one block from another") @@ -428,7 +437,7 @@ func TestBasicBitswap(t *testing.T) { // When second peer receives block, it should send out a cancel, so third // peer should no longer keep second peer's want if err = tu.WaitFor(ctx, func() error { - if len(instances[2].Exchange.WantlistForPeer(instances[1].Peer)) != 0 { + if len(instances[2].Exchange.WantlistForPeer(instances[1].Identity.ID())) != 0 { return errors.New("should have no items in other peers wantlist") } if len(instances[1].Exchange.GetWantlist()) != 0 { @@ -474,8 +483,9 @@ func TestBasicBitswap(t *testing.T) { } func TestDoubleGet(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() t.Log("Test a one node trying to get one block from another") @@ -518,7 +528,7 @@ func TestDoubleGet(t *testing.T) { } t.Log(blk) case <-time.After(time.Second * 5): - p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Peer) + p1wl := instances[0].Exchange.WantlistForPeer(instances[1].Identity.ID()) if len(p1wl) != 1 { t.Logf("wantlist view didnt have 1 item (had %d)", len(p1wl)) } else if !p1wl[0].Equals(blocks[0].Cid()) { @@ -538,8 +548,9 @@ func TestDoubleGet(t *testing.T) { } func TestWantlistCleanup(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() instances := ig.Instances(2) @@ -659,8 +670,9 @@ func newReceipt(sent, recv, exchanged uint64) *server.Receipt { } func TestBitswapLedgerOneWay(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() t.Log("Test ledgers match when one peer sends block to another") @@ -676,8 +688,8 @@ func TestBitswapLedgerOneWay(t *testing.T) { t.Fatal(err) } - ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) - rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + ra := instances[0].Exchange.LedgerForPeer(instances[1].Identity.ID()) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Identity.ID()) // compare peer ledger receipts err = assertLedgerMatch(ra, rb) @@ -707,8 +719,9 @@ func TestBitswapLedgerOneWay(t *testing.T) { } func TestBitswapLedgerTwoWay(t *testing.T) { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(net, router, nil, nil) defer ig.Close() t.Log("Test ledgers match when two peers send one block to each other") @@ -732,8 +745,8 @@ func TestBitswapLedgerTwoWay(t *testing.T) { t.Fatal(err) } - ra := instances[0].Exchange.LedgerForPeer(instances[1].Peer) - rb := instances[1].Exchange.LedgerForPeer(instances[0].Peer) + ra := instances[0].Exchange.LedgerForPeer(instances[1].Identity.ID()) + rb := instances[1].Exchange.LedgerForPeer(instances[0].Identity.ID()) // compare peer ledger receipts err = assertLedgerMatch(ra, rb) @@ -795,9 +808,10 @@ func (tsl *testingScoreLedger) Stop() { // Tests start and stop of a custom decision logic func TestWithScoreLedger(t *testing.T) { tsl := newTestingScoreLedger() - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(kNetworkDelay)) + net := tn.VirtualNetwork(delay.Fixed(kNetworkDelay)) + router := mockrouting.NewServer() bsOpts := []bitswap.Option{bitswap.WithScoreLedger(tsl)} - ig := testinstance.NewTestInstanceGenerator(net, nil, bsOpts) + ig := testinstance.NewTestInstanceGenerator(net, router, nil, bsOpts) defer ig.Close() i := ig.Next() defer i.Exchange.Close() diff --git a/bitswap/client/bitswap_with_sessions_test.go b/bitswap/client/bitswap_with_sessions_test.go index 6241865ef..2fee84217 100644 --- a/bitswap/client/bitswap_with_sessions_test.go +++ b/bitswap/client/bitswap_with_sessions_test.go @@ -8,11 +8,13 @@ import ( "time" "github.com/ipfs/boxo/bitswap" + "github.com/ipfs/boxo/bitswap/client" "github.com/ipfs/boxo/bitswap/client/internal/session" "github.com/ipfs/boxo/bitswap/client/traceability" testinstance "github.com/ipfs/boxo/bitswap/testinstance" tn "github.com/ipfs/boxo/bitswap/testnet" mockrouting "github.com/ipfs/boxo/routing/mock" + "github.com/ipfs/boxo/routing/providerquerymanager" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -26,12 +28,12 @@ const blockSize = 4 func getVirtualNetwork() tn.Network { // FIXME: the tests are really sensitive to the network delay. fix them to work // well under varying conditions - return tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) + return tn.VirtualNetwork(delay.Fixed(0)) } func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk blocks.Block) { t.Helper() - err := inst.Blockstore().Put(ctx, blk) + err := inst.Blockstore.Put(ctx, blk) if err != nil { t.Fatal(err) } @@ -39,6 +41,10 @@ func addBlock(t *testing.T, ctx context.Context, inst testinstance.Instance, blk if err != nil { t.Fatal(err) } + err = inst.Routing.Provide(ctx, blk.Cid(), true) + if err != nil { + t.Fatal(err) + } } func TestBasicSessions(t *testing.T) { @@ -46,7 +52,8 @@ func TestBasicSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() block := random.BlocksOfSize(1, blockSize)[0] @@ -56,7 +63,7 @@ func TestBasicSessions(t *testing.T) { b := inst[1] // Add a block to Peer B - if err := b.Blockstore().Put(ctx, block); err != nil { + if err := b.Blockstore.Put(ctx, block); err != nil { t.Fatal(err) } @@ -78,7 +85,7 @@ func TestBasicSessions(t *testing.T) { t.Fatal("did not get tracable block") } - if traceBlock.From != b.Peer { + if traceBlock.From != b.Identity.ID() { t.Fatal("should have received block from peer B, did not") } } @@ -107,19 +114,77 @@ func assertBlockListsFrom(from peer.ID, got, exp []blocks.Block) error { return nil } +// TestCustomProviderQueryManager tests that nothing breaks if we use a custom +// PQM when creating bitswap. +func TestCustomProviderQueryManager(t *testing.T) { + vnet := getVirtualNetwork() + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) + defer ig.Close() + + block := random.BlocksOfSize(1, blockSize)[0] + a := ig.Next() + b := ig.Next() + + // Replace bitswap in instance a with our customized one. + pqm, err := providerquerymanager.New(a.Adapter, router.Client(a.Identity)) + if err != nil { + t.Fatal(err) + } + defer pqm.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bs := bitswap.New(ctx, a.Adapter, pqm, a.Blockstore, + bitswap.WithClientOption(client.WithDefaultProviderQueryManager(false))) + a.Exchange.Close() // close old to be sure. + a.Exchange = bs + // Connect instances only after bitswap exists. + testinstance.ConnectInstances([]testinstance.Instance{a, b}) + + // Add a block to Peer B + if err := b.Blockstore.Put(ctx, block); err != nil { + t.Fatal(err) + } + + // Create a session on Peer A + sesa := a.Exchange.NewSession(ctx) + + // Get the block + blkout, err := sesa.GetBlock(ctx, block.Cid()) + if err != nil { + t.Fatal(err) + } + + if !blkout.Cid().Equals(block.Cid()) { + t.Fatal("got wrong block") + } + + traceBlock, ok := blkout.(traceability.Block) + if !ok { + t.Fatal("did not get tracable block") + } + + if traceBlock.From != b.Identity.ID() { + t.Fatal("should have received block from peer B, did not") + } +} + func TestSessionBetweenPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vnet := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.SetSimulateDontHavesOnTimeout(false)}) + vnet := tn.VirtualNetwork(delay.Fixed(time.Millisecond)) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, []bitswap.Option{bitswap.SetSimulateDontHavesOnTimeout(false)}) defer ig.Close() inst := ig.Instances(10) // Add 101 blocks to Peer A blks := random.BlocksOfSize(101, blockSize) - if err := inst[0].Blockstore().PutMany(ctx, blks); err != nil { + if err := inst[0].Blockstore.PutMany(ctx, blks); err != nil { t.Fatal(err) } @@ -147,7 +212,7 @@ func TestSessionBetweenPeers(t *testing.T) { for b := range ch { got = append(got, b) } - if err := assertBlockListsFrom(inst[0].Peer, got, blks[i*10:(i+1)*10]); err != nil { + if err := assertBlockListsFrom(inst[0].Identity.ID(), got, blks[i*10:(i+1)*10]); err != nil { t.Fatal(err) } } @@ -171,7 +236,8 @@ func TestSessionSplitFetch(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() inst := ig.Instances(11) @@ -179,7 +245,7 @@ func TestSessionSplitFetch(t *testing.T) { // Add 10 distinct blocks to each of 10 peers blks := random.BlocksOfSize(100, blockSize) for i := 0; i < 10; i++ { - if err := inst[i].Blockstore().PutMany(ctx, blks[i*10:(i+1)*10]); err != nil { + if err := inst[i].Blockstore.PutMany(ctx, blks[i*10:(i+1)*10]); err != nil { t.Fatal(err) } } @@ -203,7 +269,7 @@ func TestSessionSplitFetch(t *testing.T) { for b := range ch { got = append(got, b) } - if err := assertBlockListsFrom(inst[i].Peer, got, blks[i*10:(i+1)*10]); err != nil { + if err := assertBlockListsFrom(inst[i].Identity.ID(), got, blks[i*10:(i+1)*10]); err != nil { t.Fatal(err) } } @@ -214,7 +280,8 @@ func TestFetchNotConnected(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, []bitswap.Option{bitswap.ProviderSearchDelay(10 * time.Millisecond)}) defer ig.Close() other := ig.Next() @@ -236,7 +303,6 @@ func TestFetchNotConnected(t *testing.T) { thisNode := ig.Next() ses := thisNode.Exchange.NewSession(ctx).(*session.Session) ses.SetBaseTickDelay(time.Millisecond * 10) - ch, err := ses.GetBlocks(ctx, cids) if err != nil { t.Fatal(err) @@ -246,7 +312,7 @@ func TestFetchNotConnected(t *testing.T) { for b := range ch { got = append(got, b) } - if err := assertBlockListsFrom(other.Peer, got, blks); err != nil { + if err := assertBlockListsFrom(other.Identity.ID(), got, blks); err != nil { t.Fatal(err) } } @@ -256,7 +322,8 @@ func TestFetchAfterDisconnect(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, []bitswap.Option{ + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, []bitswap.Option{ bitswap.ProviderSearchDelay(10 * time.Millisecond), bitswap.RebroadcastDelay(delay.Fixed(15 * time.Millisecond)), }) @@ -294,12 +361,12 @@ func TestFetchAfterDisconnect(t *testing.T) { got = append(got, b) } - if err := assertBlockListsFrom(peerA.Peer, got, blks[:5]); err != nil { + if err := assertBlockListsFrom(peerA.Identity.ID(), got, blks[:5]); err != nil { t.Fatal(err) } // Break connection - err = peerA.Adapter.DisconnectFrom(ctx, peerB.Peer) + err = peerA.Adapter.DisconnectFrom(ctx, peerB.Identity.ID()) if err != nil { t.Fatal(err) } @@ -323,7 +390,7 @@ func TestFetchAfterDisconnect(t *testing.T) { } } - if err := assertBlockListsFrom(peerA.Peer, got, blks); err != nil { + if err := assertBlockListsFrom(peerA.Identity.ID(), got, blks); err != nil { t.Fatal(err) } } @@ -333,7 +400,8 @@ func TestInterestCacheOverflow(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() blks := random.BlocksOfSize(2049, blockSize) @@ -382,7 +450,8 @@ func TestPutAfterSessionCacheEvict(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() blks := random.BlocksOfSize(2500, blockSize) @@ -419,7 +488,8 @@ func TestMultipleSessions(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() blk := random.BlocksOfSize(1, blockSize)[0] @@ -459,7 +529,8 @@ func TestWantlistClearsOnCancel(t *testing.T) { defer cancel() vnet := getVirtualNetwork() - ig := testinstance.NewTestInstanceGenerator(vnet, nil, nil) + router := mockrouting.NewServer() + ig := testinstance.NewTestInstanceGenerator(vnet, router, nil, nil) defer ig.Close() blks := random.BlocksOfSize(10, blockSize) diff --git a/bitswap/client/client.go b/bitswap/client/client.go index 060d6c7f8..71d61fc27 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -13,7 +13,6 @@ import ( bsmq "github.com/ipfs/boxo/bitswap/client/internal/messagequeue" "github.com/ipfs/boxo/bitswap/client/internal/notifications" bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" - bspqm "github.com/ipfs/boxo/bitswap/client/internal/providerquerymanager" bssession "github.com/ipfs/boxo/bitswap/client/internal/session" bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" bssm "github.com/ipfs/boxo/bitswap/client/internal/sessionmanager" @@ -26,13 +25,12 @@ import ( "github.com/ipfs/boxo/bitswap/tracer" blockstore "github.com/ipfs/boxo/blockstore" exchange "github.com/ipfs/boxo/exchange" + rpqm "github.com/ipfs/boxo/routing/providerquerymanager" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" logging "github.com/ipfs/go-log/v2" "github.com/ipfs/go-metrics-interface" - process "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" "github.com/libp2p/go-libp2p/core/peer" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -99,6 +97,25 @@ func WithoutDuplicatedBlockStats() Option { } } +// WithDefaultProviderQueryManager indicates whether to use the default +// ProviderQueryManager as a wrapper of the content Router. The default bitswap +// ProviderQueryManager provides bounded parallelism and limits for these +// lookups. The bitswap default ProviderQueryManager uses these options, which +// may be more conservative than the ProviderQueryManager defaults: +// +// - WithMaxInProcessRequests(16) +// - WithMaxProviders(10) +// - WithMaxTimeout(10 *time.Second) +// +// To use a custom ProviderQueryManager, set to false and wrap directly the +// content router provided with the WithContentRouting() option. Only takes +// effect if WithContentRouting is set. +func WithDefaultProviderQueryManager(defaultProviderQueryManager bool) Option { + return func(bs *Client) { + bs.defaultProviderQueryManager = defaultProviderQueryManager + } +} + type BlockReceivedNotifier interface { // ReceivedBlocks notifies the decision engine that a peer is well-behaving // and gave us useful data, potentially increasing its score and making us @@ -106,8 +123,16 @@ type BlockReceivedNotifier interface { ReceivedBlocks(peer.ID, []blocks.Block) } +// ProviderFinder is a subset of +// https://pkg.go.dev/github.com/libp2p/go-libp2p@v0.37.0/core/routing#ContentRouting +type ProviderFinder interface { + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.AddrInfo +} + // New initializes a Bitswap client that runs until client.Close is called. -func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Client { +// The Content providerFinder paramteter can be nil to disable content-routing +// lookups for content (rely only on bitswap for discovery). +func New(parent context.Context, network bsnet.BitSwapNetwork, providerFinder ProviderFinder, bstore blockstore.Blockstore, options ...Option) *Client { // important to use provided parent context (since it may include important // loggable data). It's probably not a good idea to allow bitswap to be // coupled to the concerns of the ipfs daemon in this way. @@ -117,15 +142,30 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore // exclusively. We should probably find another way to share logging data ctx, cancelFunc := context.WithCancel(parent) - px := process.WithTeardown(func() error { - return nil - }) + bs := &Client{ + network: network, + providerFinder: providerFinder, + blockstore: bstore, + cancel: cancelFunc, + closing: make(chan struct{}), + counters: new(counters), + dupMetric: bmetrics.DupHist(ctx), + allMetric: bmetrics.AllHist(ctx), + provSearchDelay: defaults.ProvSearchDelay, + rebroadcastDelay: delay.Fixed(defaults.RebroadcastDelay), + simulateDontHavesOnTimeout: true, + defaultProviderQueryManager: true, + } + + // apply functional options before starting and running bitswap + for _, option := range options { + option(bs) + } // onDontHaveTimeout is called when a want-block is sent to a peer that // has an old version of Bitswap that doesn't support DONT_HAVE messages, // or when no response is received within a timeout. var sm *bssm.SessionManager - var bs *Client onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { // Simulate a message arriving with DONT_HAVEs if bs.simulateDontHavesOnTimeout { @@ -139,7 +179,19 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore sim := bssim.New() bpm := bsbpm.New() pm := bspm.New(ctx, peerQueueFactory, network.Self()) - pqm := bspqm.New(ctx, network) + + if bs.providerFinder != nil && bs.defaultProviderQueryManager { + // network can do dialing. + pqm, err := rpqm.New(network, bs.providerFinder, + rpqm.WithMaxInProcessRequests(16), + rpqm.WithMaxProviders(10), + rpqm.WithMaxTimeout(10*time.Second)) + if err != nil { + // Should not be possible to hit this + panic(err) + } + bs.pqm = pqm + } sessionFactory := func( sessctx context.Context, @@ -154,7 +206,17 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore rebroadcastDelay delay.D, self peer.ID, ) bssm.Session { - return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) + // careful when bs.pqm is nil. Since we are type-casting it + // into session.ProviderFinder when passing it, it will become + // not nil. Related: + // https://groups.google.com/g/golang-nuts/c/wnH302gBa4I?pli=1 + var sessionProvFinder bssession.ProviderFinder + if bs.pqm != nil { + sessionProvFinder = bs.pqm + } else if providerFinder != nil { + sessionProvFinder = providerFinder + } + return bssession.New(sessctx, sessmgr, id, spm, sessionProvFinder, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) } sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { return bsspm.New(id, network.ConnectionManager()) @@ -162,39 +224,10 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore notif := notifications.New() sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) - bs = &Client{ - blockstore: bstore, - network: network, - process: px, - pm: pm, - pqm: pqm, - sm: sm, - sim: sim, - notif: notif, - counters: new(counters), - dupMetric: bmetrics.DupHist(ctx), - allMetric: bmetrics.AllHist(ctx), - provSearchDelay: defaults.ProvSearchDelay, - rebroadcastDelay: delay.Fixed(defaults.RebroadcastDelay), - simulateDontHavesOnTimeout: true, - } - - // apply functional options before starting and running bitswap - for _, option := range options { - option(bs) - } - - bs.pqm.Startup() - - // bind the context and process. - // do it over here to avoid closing before all setup is done. - go func() { - <-px.Closing() // process closes first - sm.Shutdown() - cancelFunc() - notif.Shutdown() - }() - procctx.CloseAfterContext(px, ctx) // parent cancelled first + bs.sm = sm + bs.notif = notif + bs.pm = pm + bs.sim = sim return bs } @@ -203,8 +236,11 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore type Client struct { pm *bspm.PeerManager + providerFinder ProviderFinder + // the provider query manager manages requests to find providers - pqm *bspqm.ProviderQueryManager + pqm *rpqm.ProviderQueryManager + defaultProviderQueryManager bool // network delivers messages on behalf of the session network bsnet.BitSwapNetwork @@ -216,7 +252,9 @@ type Client struct { // manages channels of outgoing blocks for sessions notif notifications.PubSub - process process.Process + cancel context.CancelFunc + closing chan struct{} + closeOnce sync.Once // Counters for various statistics counterLk sync.Mutex @@ -301,7 +339,7 @@ func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) err defer span.End() select { - case <-bs.process.Closing(): + case <-bs.closing: return errors.New("bitswap is closed") default: } @@ -324,10 +362,10 @@ func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) err return nil } -// receiveBlocksFrom process blocks received from the network +// receiveBlocksFrom processes blocks received from the network func (bs *Client) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { select { - case <-bs.process.Closing(): + case <-bs.closing: return errors.New("bitswap is closed") default: } @@ -480,7 +518,16 @@ func (bs *Client) ReceiveError(err error) { // Close is called to shutdown the Client func (bs *Client) Close() error { - return bs.process.Close() + bs.closeOnce.Do(func() { + close(bs.closing) + bs.sm.Shutdown() + bs.cancel() + if bs.pqm != nil { + bs.pqm.Close() + } + bs.notif.Shutdown() + }) + return nil } // GetWantlist returns the current local wantlist (both want-blocks and diff --git a/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go index cdeee68ec..a6180a5d8 100644 --- a/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go +++ b/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go @@ -6,6 +6,7 @@ import ( "time" "github.com/benbjohnson/clock" + "github.com/gammazero/deque" cid "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) @@ -79,7 +80,7 @@ type dontHaveTimeoutMgr struct { // wants that are active (waiting for a response or timeout) activeWants map[cid.Cid]*pendingWant // queue of wants, from oldest to newest - wantQueue []*pendingWant + wantQueue deque.Deque[*pendingWant] // time to wait for a response (depends on latency) timeout time.Duration // ewma of message latency (time from message sent to response received) @@ -222,15 +223,15 @@ func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { // checkForTimeouts checks pending wants to see if any are over the timeout. // Note: this function should only be called within the lock. func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { - if len(dhtm.wantQueue) == 0 { + if dhtm.wantQueue.Len() == 0 { return } // Figure out which of the blocks that were wanted were not received // within the timeout expired := make([]cid.Cid, 0, len(dhtm.activeWants)) - for len(dhtm.wantQueue) > 0 { - pw := dhtm.wantQueue[0] + for dhtm.wantQueue.Len() > 0 { + pw := dhtm.wantQueue.Front() // If the want is still active if pw.active { @@ -247,7 +248,7 @@ func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { } // Remove expired or cancelled wants from the want queue - dhtm.wantQueue = dhtm.wantQueue[1:] + dhtm.wantQueue.PopFront() } // Fire the timeout event for the expired wants @@ -255,7 +256,7 @@ func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { go dhtm.fireTimeout(expired) } - if len(dhtm.wantQueue) == 0 { + if dhtm.wantQueue.Len() == 0 { return } @@ -266,7 +267,7 @@ func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { // Schedule the next check for the moment when the oldest pending want will // timeout - oldestStart := dhtm.wantQueue[0].sent + oldestStart := dhtm.wantQueue.Front().sent until := oldestStart.Add(dhtm.timeout).Sub(dhtm.clock.Now()) if dhtm.checkForTimeoutsTimer == nil { dhtm.checkForTimeoutsTimer = dhtm.clock.Timer(until) @@ -313,7 +314,7 @@ func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { active: true, } dhtm.activeWants[c] = &pw - dhtm.wantQueue = append(dhtm.wantQueue, &pw) + dhtm.wantQueue.PushBack(&pw) } } diff --git a/bitswap/client/internal/messagequeue/messagequeue.go b/bitswap/client/internal/messagequeue/messagequeue.go index fac72f7cd..0b9dc249e 100644 --- a/bitswap/client/internal/messagequeue/messagequeue.go +++ b/bitswap/client/internal/messagequeue/messagequeue.go @@ -24,34 +24,36 @@ var ( ) const ( - defaultRebroadcastInterval = 30 * time.Second - // maxRetries is the number of times to attempt to send a message before - // giving up - maxRetries = 3 - sendTimeout = 30 * time.Second // maxMessageSize is the maximum message size in bytes maxMessageSize = 1024 * 1024 * 2 + // maxPriority is the max priority as defined by the bitswap protocol + maxPriority = math.MaxInt32 + // maxRetries is the number of times to attempt to send a message before + // giving up + maxRetries = 3 + // The maximum amount of time in which to accept a response as being valid + // for latency calculation (as opposed to discarding it as an outlier) + maxValidLatency = 30 * time.Second + // rebroadcastInterval is the minimum amount of time that must elapse before + // resending wants to a peer + rebroadcastInterval = 30 * time.Second // sendErrorBackoff is the time to wait before retrying to connect after // an error when trying to send a message sendErrorBackoff = 100 * time.Millisecond - // maxPriority is the max priority as defined by the bitswap protocol - maxPriority = math.MaxInt32 - // sendMessageDebounce is the debounce duration when calling sendMessage() - sendMessageDebounce = time.Millisecond // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. sendMessageCutoff = 256 + // sendMessageDebounce is the debounce duration when calling sendMessage() + sendMessageDebounce = time.Millisecond // when we debounce for more than sendMessageMaxDelay, we'll send the // message immediately. sendMessageMaxDelay = 20 * time.Millisecond - // The maximum amount of time in which to accept a response as being valid - // for latency calculation (as opposed to discarding it as an outlier) - maxValidLatency = 30 * time.Second + sendTimeout = 30 * time.Second ) // MessageNetwork is any network that can connect peers and generate a message // sender. type MessageNetwork interface { - ConnectTo(context.Context, peer.ID) error + Connect(context.Context, peer.AddrInfo) error NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) Latency(peer.ID) time.Duration Ping(context.Context, peer.ID) ping.Result @@ -92,10 +94,8 @@ type MessageQueue struct { priority int32 // Dont touch any of these variables outside of run loop - sender bsnet.MessageSender - rebroadcastIntervalLk sync.RWMutex - rebroadcastInterval time.Duration - rebroadcastTimer *clock.Timer + sender bsnet.MessageSender + rebroadcastNow chan struct{} // For performance reasons we just clear out the fields of the message // instead of creating a new one every time. msg bsmsg.BitSwapMessage @@ -175,6 +175,23 @@ func (r *recallWantlist) ClearSentAt(c cid.Cid) { delete(r.sentAt, c) } +// Refresh moves wants from the sent list back to the pending list. +// If a want has been sent for longer than the interval, it is moved back to the pending list. +// Returns the number of wants that were refreshed. +func (r *recallWantlist) Refresh(now time.Time, interval time.Duration) int { + var refreshed int + for _, want := range r.sent.Entries() { + sentAt, ok := r.sentAt[want.Cid] + if ok && now.Sub(sentAt) >= interval { + r.pending.Add(want.Cid, want.Priority, want.WantType) + r.sent.Remove(want.Cid) + refreshed++ + } + } + + return refreshed +} + type peerConn struct { p peer.ID network MessageNetwork @@ -246,21 +263,21 @@ func newMessageQueue( ) *MessageQueue { ctx, cancel := context.WithCancel(ctx) return &MessageQueue{ - ctx: ctx, - shutdown: cancel, - p: p, - network: network, - dhTimeoutMgr: dhTimeoutMgr, - maxMessageSize: maxMsgSize, - bcstWants: newRecallWantList(), - peerWants: newRecallWantList(), - cancels: cid.NewSet(), - outgoingWork: make(chan time.Time, 1), - responses: make(chan []cid.Cid, 8), - rebroadcastInterval: defaultRebroadcastInterval, - sendErrorBackoff: sendErrorBackoff, - maxValidLatency: maxValidLatency, - priority: maxPriority, + ctx: ctx, + shutdown: cancel, + p: p, + network: network, + dhTimeoutMgr: dhTimeoutMgr, + maxMessageSize: maxMsgSize, + bcstWants: newRecallWantList(), + peerWants: newRecallWantList(), + cancels: cid.NewSet(), + outgoingWork: make(chan time.Time, 1), + responses: make(chan []cid.Cid, 8), + rebroadcastNow: make(chan struct{}), + sendErrorBackoff: sendErrorBackoff, + maxValidLatency: maxValidLatency, + priority: maxPriority, // For performance reasons we just clear out the fields of the message // after using it, instead of creating a new one every time. msg: bsmsg.New(false), @@ -377,21 +394,15 @@ func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { } } -// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist -func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { - mq.rebroadcastIntervalLk.Lock() - mq.rebroadcastInterval = delay - if mq.rebroadcastTimer != nil { - mq.rebroadcastTimer.Reset(delay) +func (mq *MessageQueue) RebroadcastNow() { + select { + case mq.rebroadcastNow <- struct{}{}: + case <-mq.ctx.Done(): } - mq.rebroadcastIntervalLk.Unlock() } // Startup starts the processing of messages and rebroadcasting. func (mq *MessageQueue) Startup() { - mq.rebroadcastIntervalLk.RLock() - mq.rebroadcastTimer = mq.clock.Timer(mq.rebroadcastInterval) - mq.rebroadcastIntervalLk.RUnlock() go mq.runQueue() } @@ -411,6 +422,8 @@ func (mq *MessageQueue) onShutdown() { } func (mq *MessageQueue) runQueue() { + const runRebroadcastsInterval = rebroadcastInterval / 2 + defer mq.onShutdown() // Create a timer for debouncing scheduled work. @@ -421,11 +434,18 @@ func (mq *MessageQueue) runQueue() { <-scheduleWork.C } + rebroadcastTimer := mq.clock.Timer(runRebroadcastsInterval) + defer rebroadcastTimer.Stop() + var workScheduled time.Time - for mq.ctx.Err() == nil { + for { select { - case <-mq.rebroadcastTimer.C: - mq.rebroadcastWantlist() + case now := <-rebroadcastTimer.C: + mq.rebroadcastWantlist(now, rebroadcastInterval) + rebroadcastTimer.Reset(runRebroadcastsInterval) + + case <-mq.rebroadcastNow: + mq.rebroadcastWantlist(mq.clock.Now(), 0) case when := <-mq.outgoingWork: // If we have work scheduled, cancel the timer. If we @@ -470,35 +490,20 @@ func (mq *MessageQueue) runQueue() { } // Periodically resend the list of wants to the peer -func (mq *MessageQueue) rebroadcastWantlist() { - mq.rebroadcastIntervalLk.RLock() - mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) - mq.rebroadcastIntervalLk.RUnlock() +func (mq *MessageQueue) rebroadcastWantlist(now time.Time, interval time.Duration) { + mq.wllock.Lock() + // Transfer wants from the rebroadcast lists into the pending lists. + toRebroadcast := mq.bcstWants.Refresh(now, interval) + mq.peerWants.Refresh(now, interval) + mq.wllock.Unlock() // If some wants were transferred from the rebroadcast list - if mq.transferRebroadcastWants() { + if toRebroadcast > 0 { // Send them out mq.sendMessage() + log.Infow("Rebroadcasting wants", "amount", toRebroadcast, "peer", mq.p) } } -// Transfer wants from the rebroadcast lists into the pending lists. -func (mq *MessageQueue) transferRebroadcastWants() bool { - mq.wllock.Lock() - defer mq.wllock.Unlock() - - // Check if there are any wants to rebroadcast - if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { - return false - } - - // Copy sent wants into pending wants lists - mq.bcstWants.pending.Absorb(mq.bcstWants.sent) - mq.peerWants.pending.Absorb(mq.peerWants.sent) - - return true -} - func (mq *MessageQueue) signalWorkReady() { select { case mq.outgoingWork <- mq.clock.Now(): diff --git a/bitswap/client/internal/messagequeue/messagequeue_test.go b/bitswap/client/internal/messagequeue/messagequeue_test.go index 3a9c21309..dee2aa58c 100644 --- a/bitswap/client/internal/messagequeue/messagequeue_test.go +++ b/bitswap/client/internal/messagequeue/messagequeue_test.go @@ -27,7 +27,7 @@ type fakeMessageNetwork struct { messageSender bsnet.MessageSender } -func (fmn *fakeMessageNetwork) ConnectTo(context.Context, peer.ID) error { +func (fmn *fakeMessageNetwork) Connect(context.Context, peer.AddrInfo) error { return fmn.connectError } @@ -432,9 +432,7 @@ func TestWantlistRebroadcast(t *testing.T) { t.Fatal("wrong number of wants") } - // Tell message queue to rebroadcast after 5ms, then wait 8ms - messageQueue.SetRebroadcastInterval(5 * time.Millisecond) - clock.Add(8 * time.Millisecond) + messageQueue.RebroadcastNow() message = <-messagesSent expectEvent(t, events, messageFinishedSending) @@ -443,10 +441,7 @@ func TestWantlistRebroadcast(t *testing.T) { t.Fatal("did not rebroadcast all wants") } - // Tell message queue to rebroadcast after a long time (so it doesn't - // interfere with the next message collection), then send out some - // regular wants and collect them - messageQueue.SetRebroadcastInterval(1 * time.Second) + // Send out some regular wants and collect them messageQueue.AddWants(wantBlocks, wantHaves) expectEvent(t, events, messageQueued) clock.Add(10 * time.Millisecond) @@ -464,9 +459,7 @@ func TestWantlistRebroadcast(t *testing.T) { default: } - // Tell message queue to rebroadcast after 10ms, then wait 15ms - messageQueue.SetRebroadcastInterval(10 * time.Millisecond) - clock.Add(15 * time.Millisecond) + messageQueue.RebroadcastNow() message = <-messagesSent expectEvent(t, events, messageFinishedSending) @@ -477,7 +470,6 @@ func TestWantlistRebroadcast(t *testing.T) { } // Cancel some of the wants - messageQueue.SetRebroadcastInterval(1 * time.Second) cancels := append([]cid.Cid{bcstwh[0]}, wantHaves[0], wantBlocks[0]) messageQueue.AddCancels(cancels) expectEvent(t, events, messageQueued) @@ -501,9 +493,7 @@ func TestWantlistRebroadcast(t *testing.T) { } } - // Tell message queue to rebroadcast after 10ms, then wait 15ms - messageQueue.SetRebroadcastInterval(10 * time.Millisecond) - clock.Add(15 * time.Millisecond) + messageQueue.RebroadcastNow() message = <-messagesSent expectEvent(t, events, messageFinishedSending) diff --git a/bitswap/client/internal/notifications/notifications.go b/bitswap/client/internal/notifications/notifications.go index 499a61c42..cd0816161 100644 --- a/bitswap/client/internal/notifications/notifications.go +++ b/bitswap/client/internal/notifications/notifications.go @@ -69,12 +69,13 @@ func (ps *impl) Shutdown() { // corresponding to |keys|. func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { blocksCh := make(chan blocks.Block, len(keys)) - valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking if len(keys) == 0 { close(blocksCh) return blocksCh } + valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking + // prevent shutdown ps.lk.RLock() defer ps.lk.RUnlock() diff --git a/bitswap/client/internal/peermanager/peermanager.go b/bitswap/client/internal/peermanager/peermanager.go index 25cdd605f..76c636062 100644 --- a/bitswap/client/internal/peermanager/peermanager.go +++ b/bitswap/client/internal/peermanager/peermanager.go @@ -42,7 +42,7 @@ type PeerManager struct { createPeerQueue PeerQueueFactory ctx context.Context - psLk sync.RWMutex + psLk sync.Mutex sessions map[uint64]Session peerSessions map[peer.ID]map[uint64]struct{} @@ -143,13 +143,15 @@ func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.C // SendWants sends the given want-blocks and want-haves to the given peer. // It filters out wants that have previously been sent to the peer. -func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { +func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) bool { pm.pqLk.Lock() defer pm.pqLk.Unlock() - if _, ok := pm.peerQueues[p]; ok { - pm.pwm.sendWants(p, wantBlocks, wantHaves) + if _, ok := pm.peerQueues[p]; !ok { + return false } + pm.pwm.sendWants(p, wantBlocks, wantHaves) + return true } // SendCancels sends cancels for the given keys to all peers who had previously diff --git a/bitswap/client/internal/peermanager/peerwantmanager.go b/bitswap/client/internal/peermanager/peerwantmanager.go index 0bc4732ca..e9fdfbb46 100644 --- a/bitswap/client/internal/peermanager/peerwantmanager.go +++ b/bitswap/client/internal/peermanager/peerwantmanager.go @@ -1,8 +1,8 @@ package peermanager import ( - "bytes" "fmt" + "strings" cid "github.com/ipfs/go-cid" peer "github.com/libp2p/go-libp2p/core/peer" @@ -158,8 +158,6 @@ func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { // sendWants only sends the peer the want-blocks and want-haves that have not // already been sent to it. func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { - fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) - fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) // Get the existing want-blocks and want-haves for the peer pws, ok := pwm.peerWants[p] @@ -169,6 +167,8 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves return } + fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) + // Iterate over the requested want-blocks for _, c := range wantBlocks { // If the want-block hasn't been sent to the peer @@ -198,6 +198,8 @@ func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves pwm.reverseIndexAdd(c, p) } + fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) + // Iterate over the requested want-haves for _, c := range wantHaves { // If we've already broadcasted this want, don't bother with a @@ -450,7 +452,7 @@ func (pwm *peerWantManager) getWants() []cid.Cid { } func (pwm *peerWantManager) String() string { - var b bytes.Buffer + var b strings.Builder for p, ws := range pwm.peerWants { b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) for _, c := range ws.wantHaves.Keys() { diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager.go b/bitswap/client/internal/providerquerymanager/providerquerymanager.go deleted file mode 100644 index ea10a40e5..000000000 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager.go +++ /dev/null @@ -1,455 +0,0 @@ -package providerquerymanager - -import ( - "context" - "sync" - "time" - - "github.com/ipfs/boxo/bitswap/client/internal" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - peer "github.com/libp2p/go-libp2p/core/peer" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var log = logging.Logger("bitswap/client/provqrymgr") - -const ( - maxProviders = 10 - maxInProcessRequests = 6 - defaultTimeout = 10 * time.Second -) - -type inProgressRequestStatus struct { - ctx context.Context - cancelFn func() - providersSoFar []peer.ID - listeners map[chan peer.ID]struct{} -} - -type findProviderRequest struct { - k cid.Cid - ctx context.Context -} - -// ProviderQueryNetwork is an interface for finding providers and connecting to -// peers. -type ProviderQueryNetwork interface { - ConnectTo(context.Context, peer.ID) error - FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID -} - -type providerQueryMessage interface { - debugMessage() - handle(pqm *ProviderQueryManager) -} - -type receivedProviderMessage struct { - ctx context.Context - k cid.Cid - p peer.ID -} - -type finishedProviderQueryMessage struct { - ctx context.Context - k cid.Cid -} - -type newProvideQueryMessage struct { - ctx context.Context - k cid.Cid - inProgressRequestChan chan<- inProgressRequest -} - -type cancelRequestMessage struct { - ctx context.Context - incomingProviders chan peer.ID - k cid.Cid -} - -// ProviderQueryManager manages requests to find more providers for blocks -// for bitswap sessions. It's main goals are to: -// - rate limit requests -- don't have too many find provider calls running -// simultaneously -// - connect to found peers and filter them if it can't connect -// - ensure two findprovider calls for the same block don't run concurrently -// - manage timeouts -type ProviderQueryManager struct { - ctx context.Context - network ProviderQueryNetwork - providerQueryMessages chan providerQueryMessage - providerRequestsProcessing chan *findProviderRequest - incomingFindProviderRequests chan *findProviderRequest - - findProviderTimeout time.Duration - timeoutMutex sync.RWMutex - - // do not touch outside the run loop - inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus -} - -// New initializes a new ProviderQueryManager for a given context and a given -// network provider. -func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { - return &ProviderQueryManager{ - ctx: ctx, - network: network, - providerQueryMessages: make(chan providerQueryMessage, 16), - providerRequestsProcessing: make(chan *findProviderRequest), - incomingFindProviderRequests: make(chan *findProviderRequest), - inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), - findProviderTimeout: defaultTimeout, - } -} - -// Startup starts processing for the ProviderQueryManager. -func (pqm *ProviderQueryManager) Startup() { - go pqm.run() -} - -type inProgressRequest struct { - providersSoFar []peer.ID - incoming chan peer.ID -} - -// SetFindProviderTimeout changes the timeout for finding providers -func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { - pqm.timeoutMutex.Lock() - pqm.findProviderTimeout = findProviderTimeout - pqm.timeoutMutex.Unlock() -} - -// FindProvidersAsync finds providers for the given block. -func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { - inProgressRequestChan := make(chan inProgressRequest) - - var span trace.Span - sessionCtx, span = internal.StartSpan(sessionCtx, "ProviderQueryManager.FindProvidersAsync", trace.WithAttributes(attribute.Stringer("cid", k))) - defer span.End() - - select { - case pqm.providerQueryMessages <- &newProvideQueryMessage{ - ctx: sessionCtx, - k: k, - inProgressRequestChan: inProgressRequestChan, - }: - case <-pqm.ctx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch - case <-sessionCtx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch - } - - // DO NOT select on sessionCtx. We only want to abort here if we're - // shutting down because we can't actually _cancel_ the request till we - // get to receiveProviders. - var receivedInProgressRequest inProgressRequest - select { - case <-pqm.ctx.Done(): - ch := make(chan peer.ID) - close(ch) - return ch - case receivedInProgressRequest = <-inProgressRequestChan: - } - - return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) -} - -func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { - // maintains an unbuffered queue for incoming providers for given request for a given session - // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all - // sessions that queried that CID, without worrying about whether the client code is actually - // reading from the returned channel -- so that the broadcast never blocks - // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd - returnedProviders := make(chan peer.ID) - receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) - incomingProviders := receivedInProgressRequest.incoming - - go func() { - defer close(returnedProviders) - outgoingProviders := func() chan<- peer.ID { - if len(receivedProviders) == 0 { - return nil - } - return returnedProviders - } - nextProvider := func() peer.ID { - if len(receivedProviders) == 0 { - return "" - } - return receivedProviders[0] - } - for len(receivedProviders) > 0 || incomingProviders != nil { - select { - case <-pqm.ctx.Done(): - return - case <-sessionCtx.Done(): - if incomingProviders != nil { - pqm.cancelProviderRequest(sessionCtx, k, incomingProviders) - } - return - case provider, ok := <-incomingProviders: - if !ok { - incomingProviders = nil - } else { - receivedProviders = append(receivedProviders, provider) - } - case outgoingProviders() <- nextProvider(): - receivedProviders = receivedProviders[1:] - } - } - }() - return returnedProviders -} - -func (pqm *ProviderQueryManager) cancelProviderRequest(ctx context.Context, k cid.Cid, incomingProviders chan peer.ID) { - cancelMessageChannel := pqm.providerQueryMessages - for { - select { - case cancelMessageChannel <- &cancelRequestMessage{ - ctx: ctx, - incomingProviders: incomingProviders, - k: k, - }: - cancelMessageChannel = nil - // clear out any remaining providers, in case and "incoming provider" - // messages get processed before our cancel message - case _, ok := <-incomingProviders: - if !ok { - return - } - case <-pqm.ctx.Done(): - return - } - } -} - -func (pqm *ProviderQueryManager) findProviderWorker() { - // findProviderWorker just cycles through incoming provider queries one - // at a time. We have six of these workers running at once - // to let requests go in parallel but keep them rate limited - for { - select { - case fpr, ok := <-pqm.providerRequestsProcessing: - if !ok { - return - } - k := fpr.k - log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) - pqm.timeoutMutex.RLock() - findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) - pqm.timeoutMutex.RUnlock() - span := trace.SpanFromContext(findProviderCtx) - span.AddEvent("StartFindProvidersAsync") - providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) - wg := &sync.WaitGroup{} - for p := range providers { - wg.Add(1) - go func(p peer.ID) { - defer wg.Done() - span.AddEvent("FoundProvider", trace.WithAttributes(attribute.Stringer("peer", p))) - err := pqm.network.ConnectTo(findProviderCtx, p) - if err != nil { - span.RecordError(err, trace.WithAttributes(attribute.Stringer("peer", p))) - log.Debugf("failed to connect to provider %s: %s", p, err) - return - } - span.AddEvent("ConnectedToProvider", trace.WithAttributes(attribute.Stringer("peer", p))) - select { - case pqm.providerQueryMessages <- &receivedProviderMessage{ - ctx: findProviderCtx, - k: k, - p: p, - }: - case <-pqm.ctx.Done(): - return - } - }(p) - } - wg.Wait() - cancel() - select { - case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ - ctx: findProviderCtx, - k: k, - }: - case <-pqm.ctx.Done(): - } - case <-pqm.ctx.Done(): - return - } - } -} - -func (pqm *ProviderQueryManager) providerRequestBufferWorker() { - // the provider request buffer worker just maintains an unbounded - // buffer for incoming provider queries and dispatches to the find - // provider workers as they become available - // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd - var providerQueryRequestBuffer []*findProviderRequest - nextProviderQuery := func() *findProviderRequest { - if len(providerQueryRequestBuffer) == 0 { - return nil - } - return providerQueryRequestBuffer[0] - } - outgoingRequests := func() chan<- *findProviderRequest { - if len(providerQueryRequestBuffer) == 0 { - return nil - } - return pqm.providerRequestsProcessing - } - - for { - select { - case incomingRequest, ok := <-pqm.incomingFindProviderRequests: - if !ok { - return - } - providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) - case outgoingRequests() <- nextProviderQuery(): - providerQueryRequestBuffer = providerQueryRequestBuffer[1:] - case <-pqm.ctx.Done(): - return - } - } -} - -func (pqm *ProviderQueryManager) cleanupInProcessRequests() { - for _, requestStatus := range pqm.inProgressRequestStatuses { - for listener := range requestStatus.listeners { - close(listener) - } - requestStatus.cancelFn() - } -} - -func (pqm *ProviderQueryManager) run() { - defer pqm.cleanupInProcessRequests() - - go pqm.providerRequestBufferWorker() - for i := 0; i < maxInProcessRequests; i++ { - go pqm.findProviderWorker() - } - - for { - select { - case nextMessage := <-pqm.providerQueryMessages: - nextMessage.debugMessage() - nextMessage.handle(pqm) - case <-pqm.ctx.Done(): - return - } - } -} - -func (rpm *receivedProviderMessage) debugMessage() { - log.Debugf("Received provider (%s) (%s)", rpm.p, rpm.k) - trace.SpanFromContext(rpm.ctx).AddEvent("ReceivedProvider", trace.WithAttributes(attribute.Stringer("provider", rpm.p), attribute.Stringer("cid", rpm.k))) -} - -func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] - if !ok { - log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) - return - } - requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) - for listener := range requestStatus.listeners { - select { - case listener <- rpm.p: - case <-pqm.ctx.Done(): - return - } - } -} - -func (fpqm *finishedProviderQueryMessage) debugMessage() { - log.Debugf("Finished Provider Query on cid: %s", fpqm.k) - trace.SpanFromContext(fpqm.ctx).AddEvent("FinishedProviderQuery", trace.WithAttributes(attribute.Stringer("cid", fpqm.k))) -} - -func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] - if !ok { - // we canceled the request as it finished. - return - } - for listener := range requestStatus.listeners { - close(listener) - } - delete(pqm.inProgressRequestStatuses, fpqm.k) - requestStatus.cancelFn() -} - -func (npqm *newProvideQueryMessage) debugMessage() { - log.Debugf("New Provider Query on cid: %s", npqm.k) - trace.SpanFromContext(npqm.ctx).AddEvent("NewProvideQuery", trace.WithAttributes(attribute.Stringer("cid", npqm.k))) -} - -func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] - if !ok { - - ctx, cancelFn := context.WithCancel(pqm.ctx) - span := trace.SpanFromContext(npqm.ctx) - span.AddEvent("NewQuery", trace.WithAttributes(attribute.Stringer("cid", npqm.k))) - ctx = trace.ContextWithSpan(ctx, span) - - requestStatus = &inProgressRequestStatus{ - listeners: make(map[chan peer.ID]struct{}), - ctx: ctx, - cancelFn: cancelFn, - } - - pqm.inProgressRequestStatuses[npqm.k] = requestStatus - - select { - case pqm.incomingFindProviderRequests <- &findProviderRequest{ - k: npqm.k, - ctx: ctx, - }: - case <-pqm.ctx.Done(): - return - } - } else { - trace.SpanFromContext(npqm.ctx).AddEvent("JoinQuery", trace.WithAttributes(attribute.Stringer("cid", npqm.k))) - } - inProgressChan := make(chan peer.ID) - requestStatus.listeners[inProgressChan] = struct{}{} - select { - case npqm.inProgressRequestChan <- inProgressRequest{ - providersSoFar: requestStatus.providersSoFar, - incoming: inProgressChan, - }: - case <-pqm.ctx.Done(): - } -} - -func (crm *cancelRequestMessage) debugMessage() { - log.Debugf("Cancel provider query on cid: %s", crm.k) - trace.SpanFromContext(crm.ctx).AddEvent("CancelRequest", trace.WithAttributes(attribute.Stringer("cid", crm.k))) -} - -func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { - requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] - if !ok { - // Request finished while queued. - return - } - _, ok = requestStatus.listeners[crm.incomingProviders] - if !ok { - // Request finished and _restarted_ while queued. - return - } - delete(requestStatus.listeners, crm.incomingProviders) - close(crm.incomingProviders) - if len(requestStatus.listeners) == 0 { - delete(pqm.inProgressRequestStatuses, crm.k) - requestStatus.cancelFn() - } -} diff --git a/bitswap/client/internal/session/cidqueue.go b/bitswap/client/internal/session/cidqueue.go index aedfa944c..2ecd0f672 100644 --- a/bitswap/client/internal/session/cidqueue.go +++ b/bitswap/client/internal/session/cidqueue.go @@ -1,9 +1,12 @@ package session -import cid "github.com/ipfs/go-cid" +import ( + "github.com/gammazero/deque" + cid "github.com/ipfs/go-cid" +) type cidQueue struct { - elems []cid.Cid + elems deque.Deque[cid.Cid] eset *cid.Set } @@ -13,12 +16,11 @@ func newCidQueue() *cidQueue { func (cq *cidQueue) Pop() cid.Cid { for { - if len(cq.elems) == 0 { + if cq.elems.Len() == 0 { return cid.Cid{} } - out := cq.elems[0] - cq.elems = cq.elems[1:] + out := cq.elems.PopFront() if cq.eset.Has(out) { cq.eset.Remove(out) @@ -29,24 +31,30 @@ func (cq *cidQueue) Pop() cid.Cid { func (cq *cidQueue) Cids() []cid.Cid { // Lazily delete from the list any cids that were removed from the set - if len(cq.elems) > cq.eset.Len() { - i := 0 - for _, c := range cq.elems { + if cq.elems.Len() > cq.eset.Len() { + for i := 0; i < cq.elems.Len(); i++ { + c := cq.elems.PopFront() if cq.eset.Has(c) { - cq.elems[i] = c - i++ + cq.elems.PushBack(c) } } - cq.elems = cq.elems[:i] + } + + if cq.elems.Len() == 0 { + return nil } // Make a copy of the cids - return append([]cid.Cid{}, cq.elems...) + cids := make([]cid.Cid, cq.elems.Len()) + for i := 0; i < cq.elems.Len(); i++ { + cids[i] = cq.elems.At(i) + } + return cids } func (cq *cidQueue) Push(c cid.Cid) { if cq.eset.Visit(c) { - cq.elems = append(cq.elems, c) + cq.elems.PushBack(c) } } diff --git a/bitswap/client/internal/session/peerresponsetracker.go b/bitswap/client/internal/session/peerresponsetracker.go index 25c10fe87..f96424742 100644 --- a/bitswap/client/internal/session/peerresponsetracker.go +++ b/bitswap/client/internal/session/peerresponsetracker.go @@ -31,8 +31,6 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { return "" } - rnd := rand.Float64() - // Find the total received blocks for all candidate peers total := 0 for _, p := range peers { @@ -41,6 +39,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { // Choose one of the peers with a chance proportional to the number // of blocks received from that peer + rnd := rand.Float64() counted := 0.0 for _, p := range peers { counted += float64(prt.getPeerCount(p)) / float64(total) @@ -52,8 +51,7 @@ func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { // We shouldn't get here unless there is some weirdness with floating point // math that doesn't quite cover the whole range of peers in the for loop // so just choose the last peer. - index := len(peers) - 1 - return peers[index] + return peers[len(peers)-1] } // getPeerCount returns the number of times the peer was first to send us a diff --git a/bitswap/client/internal/session/session.go b/bitswap/client/internal/session/session.go index 6f99dec0e..f11d7d83e 100644 --- a/bitswap/client/internal/session/session.go +++ b/bitswap/client/internal/session/session.go @@ -38,7 +38,7 @@ type PeerManager interface { // interested in a peer's connection state UnregisterSession(uint64) // SendWants tells the PeerManager to send wants to the given peer - SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) + SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) bool // BroadcastWantHaves sends want-haves to all connected peers (used for // session discovery) BroadcastWantHaves(context.Context, []cid.Cid) @@ -75,7 +75,7 @@ type SessionPeerManager interface { // ProviderFinder is used to find providers for a given key type ProviderFinder interface { // FindProvidersAsync searches for peers that provide the given CID - FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID + FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo } // opType is the kind of operation that is being processed by the event loop @@ -403,14 +403,18 @@ func (s *Session) handlePeriodicSearch(ctx context.Context) { // findMorePeers attempts to find more peers for a session by searching for // providers for the given Cid func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { + // noop when provider finder is disabled + if s.providerFinder == nil { + return + } go func(k cid.Cid) { ctx, span := internal.StartSpan(ctx, "Session.FindMorePeers") defer span.End() - for p := range s.providerFinder.FindProvidersAsync(ctx, k) { + for p := range s.providerFinder.FindProvidersAsync(ctx, k, 0) { // When a provider indicates that it has a cid, it's equivalent to // the providing peer sending a HAVE span.AddEvent("FoundPeer") - s.sws.Update(p, nil, []cid.Cid{c}, nil) + s.sws.Update(p.ID, nil, []cid.Cid{c}, nil) } }(c) } diff --git a/bitswap/client/internal/session/session_test.go b/bitswap/client/internal/session/session_test.go index a14fdffd0..c0d26a91d 100644 --- a/bitswap/client/internal/session/session_test.go +++ b/bitswap/client/internal/session/session_test.go @@ -12,7 +12,6 @@ import ( bspm "github.com/ipfs/boxo/bitswap/client/internal/peermanager" bssim "github.com/ipfs/boxo/bitswap/client/internal/sessioninterestmanager" bsspm "github.com/ipfs/boxo/bitswap/client/internal/sessionpeermanager" - "github.com/ipfs/boxo/internal/test" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" @@ -116,7 +115,7 @@ func newFakeProviderFinder() *fakeProviderFinder { } } -func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID { +func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo { go func() { select { case fpf.findMorePeersRequested <- k: @@ -124,7 +123,7 @@ func (fpf *fakeProviderFinder) FindProvidersAsync(ctx context.Context, k cid.Cid } }() - return make(chan peer.ID) + return make(chan peer.AddrInfo) } type wantReq struct { @@ -141,9 +140,11 @@ func newFakePeerManager() *fakePeerManager { } } -func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} -func (pm *fakePeerManager) UnregisterSession(uint64) {} -func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} +func (pm *fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} +func (pm *fakePeerManager) UnregisterSession(uint64) {} +func (pm *fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) bool { + return true +} func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Cid) { select { case pm.wantReqs <- wantReq{cids}: @@ -153,8 +154,6 @@ func (pm *fakePeerManager) BroadcastWantHaves(ctx context.Context, cids []cid.Ci func (pm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) {} func TestSessionGetBlocks(t *testing.T) { - test.Flaky(t) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) fpm := newFakePeerManager() fspm := newFakeSessionPeerManager() @@ -173,9 +172,7 @@ func TestSessionGetBlocks(t *testing.T) { } _, err := session.GetBlocks(ctx, cids) - if err != nil { - t.Fatal("error getting blocks") - } + require.NoError(t, err, "error getting blocks") // Wait for initial want request receivedWantReq := <-fpm.wantReqs @@ -343,8 +340,6 @@ func TestSessionOnPeersExhausted(t *testing.T) { } func TestSessionFailingToGetFirstBlock(t *testing.T) { - test.Flaky(t) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() fpm := newFakePeerManager() @@ -362,7 +357,6 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { for _, block := range blks { cids = append(cids, block.Cid()) } - startTick := time.Now() _, err := session.GetBlocks(ctx, cids) require.NoError(t, err, "error getting blocks") @@ -394,7 +388,6 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { case <-ctx.Done(): t.Fatal("Did not find more peers") } - firstTickLength := time.Since(startTick) // Wait for another broadcast to occur select { @@ -407,7 +400,6 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { } // Wait for another broadcast to occur - startTick = time.Now() select { case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) < len(cids) { @@ -417,14 +409,7 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { t.Fatal("Never rebroadcast want list") } - // Tick should take longer - consecutiveTickLength := time.Since(startTick) - if firstTickLength > consecutiveTickLength { - t.Fatal("Should have increased tick length after first consecutive tick") - } - // Wait for another broadcast to occur - startTick = time.Now() select { case receivedWantReq := <-fpm.wantReqs: if len(receivedWantReq.cids) < len(cids) { @@ -434,12 +419,6 @@ func TestSessionFailingToGetFirstBlock(t *testing.T) { t.Fatal("Never rebroadcast want list") } - // Tick should take longer - secondConsecutiveTickLength := time.Since(startTick) - if consecutiveTickLength > secondConsecutiveTickLength { - t.Fatal("Should have increased tick length after first consecutive tick") - } - // Should not have tried to find peers on consecutive ticks select { case <-fpf.findMorePeersRequested: diff --git a/bitswap/client/internal/session/sessionwants.go b/bitswap/client/internal/session/sessionwants.go index 0d4ded013..4653ef872 100644 --- a/bitswap/client/internal/session/sessionwants.go +++ b/bitswap/client/internal/session/sessionwants.go @@ -56,8 +56,12 @@ func (sw *sessionWants) GetNextWants() []cid.Cid { // limit) currentLiveCount := len(sw.liveWants) toAdd := sw.broadcastLimit - currentLiveCount + liveSize := min(toAdd, sw.toFetch.Len()) + if liveSize <= 0 { + return nil + } - var live []cid.Cid + live := make([]cid.Cid, 0, liveSize) for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { c := sw.toFetch.Pop() live = append(live, c) @@ -117,6 +121,7 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) cleaned = append(cleaned, c) } } + clear(sw.liveWantsOrder[len(cleaned):]) // GC cleared items sw.liveWantsOrder = cleaned } @@ -127,7 +132,7 @@ func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) // live want CIDs up to the broadcast limit. func (sw *sessionWants) PrepareBroadcast() []cid.Cid { now := time.Now() - live := make([]cid.Cid, 0, len(sw.liveWants)) + live := make([]cid.Cid, 0, min(len(sw.liveWants), sw.broadcastLimit)) for _, c := range sw.liveWantsOrder { if _, ok := sw.liveWants[c]; ok { // No response was received for the want, so reset the sent time @@ -148,6 +153,7 @@ func (sw *sessionWants) PrepareBroadcast() []cid.Cid { func (sw *sessionWants) CancelPending(keys []cid.Cid) { for _, k := range keys { sw.toFetch.Remove(k) + delete(sw.liveWants, k) } } diff --git a/bitswap/client/internal/session/sessionwantsender.go b/bitswap/client/internal/session/sessionwantsender.go index 1beefeb94..09a9381ea 100644 --- a/bitswap/client/internal/session/sessionwantsender.go +++ b/bitswap/client/internal/session/sessionwantsender.go @@ -161,8 +161,7 @@ func (sws *sessionWantSender) Cancel(ks []cid.Cid) { // Update is called when the session receives a message with incoming blocks // or HAVE / DONT_HAVE func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { - hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 - if !hasUpdate { + if len(ks) == 0 && len(haves) == 0 && len(dontHaves) == 0 { return } @@ -349,8 +348,7 @@ func (sws *sessionWantSender) trackWant(c cid.Cid) { } // Create the want info - wi := newWantInfo(sws.peerRspTrkr) - sws.wants[c] = wi + sws.wants[c] = newWantInfo(sws.peerRspTrkr) // For each available peer, register any information we know about // whether the peer has the block @@ -481,7 +479,7 @@ func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyU // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) if len(newlyUnavailable) > 0 { // Collect all pending wants - wants = make([]cid.Cid, len(sws.wants)) + wants = make([]cid.Cid, 0, len(sws.wants)) for c := range sws.wants { wants = append(wants, c) } @@ -515,6 +513,7 @@ func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { type wantSets struct { wantBlocks *cid.Set wantHaves *cid.Set + sent bool } type allWants map[peer.ID]*wantSets @@ -553,9 +552,6 @@ func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { continue } - // Record that we are sending a want-block for this want to the peer - sws.setWantSentTo(c, wi.bestPeer) - // Send a want-block to the chosen peer toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) @@ -569,6 +565,16 @@ func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { // Send any wants we've collected sws.sendWants(toSend) + + for c, wi := range sws.wants { + if wi.bestPeer != "" && wi.sentTo == "" { + // check if a want block was successfully sent to the best peer + if toSend.forPeer(wi.bestPeer).sent { + // Record that we are sending a want-block for this want to the peer + sws.setWantSentTo(c, wi.bestPeer) + } + } + } } // sendWants sends want-have and want-blocks to the appropriate peers @@ -586,8 +592,11 @@ func (sws *sessionWantSender) sendWants(sends allWants) { // precedence over want-haves. wblks := snd.wantBlocks.Keys() whaves := snd.wantHaves.Keys() - sws.pm.SendWants(sws.ctx, p, wblks, whaves) - + snd.sent = sws.pm.SendWants(sws.ctx, p, wblks, whaves) + if !snd.sent { + // Do not update state if the wants not sent. + continue + } // Inform the session that we've sent the wants sws.onSend(p, wblks, whaves) diff --git a/bitswap/client/internal/session/sessionwantsender_test.go b/bitswap/client/internal/session/sessionwantsender_test.go index ac094ac06..e5589dd58 100644 --- a/bitswap/client/internal/session/sessionwantsender_test.go +++ b/bitswap/client/internal/session/sessionwantsender_test.go @@ -82,7 +82,7 @@ func (*mockPeerManager) UnregisterSession(uint64) {} func (*mockPeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} func (*mockPeerManager) SendCancels(context.Context, []cid.Cid) {} -func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { +func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) bool { pm.lk.Lock() defer pm.lk.Unlock() @@ -92,6 +92,7 @@ func (pm *mockPeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks pm.peerSends[p] = sw } sw.add(wantBlocks, wantHaves) + return true } func (pm *mockPeerManager) waitNextWants() map[peer.ID]*sentWants { diff --git a/bitswap/client/internal/sessionmanager/sessionmanager.go b/bitswap/client/internal/sessionmanager/sessionmanager.go index a75a3f769..0d2b24330 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager.go @@ -57,7 +57,7 @@ type SessionManager struct { notif notifications.PubSub // Sessions - sessLk sync.RWMutex + sessLk sync.Mutex sessions map[uint64]Session // Session Index @@ -159,13 +159,13 @@ func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { - sm.sessLk.RLock() + sm.sessLk.Lock() if sm.sessions == nil { // check if SessionManager was shutdown - sm.sessLk.RUnlock() + sm.sessLk.Unlock() return } sess, ok := sm.sessions[id] - sm.sessLk.RUnlock() + sm.sessLk.Unlock() if ok { sess.ReceiveFrom(p, blks, haves, dontHaves) diff --git a/bitswap/client/internal/sessionmanager/sessionmanager_test.go b/bitswap/client/internal/sessionmanager/sessionmanager_test.go index e8259b1d8..bad26ad90 100644 --- a/bitswap/client/internal/sessionmanager/sessionmanager_test.go +++ b/bitswap/client/internal/sessionmanager/sessionmanager_test.go @@ -66,10 +66,10 @@ type fakePeerManager struct { cancels []cid.Cid } -func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} -func (*fakePeerManager) UnregisterSession(uint64) {} -func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) {} -func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} +func (*fakePeerManager) RegisterSession(peer.ID, bspm.Session) {} +func (*fakePeerManager) UnregisterSession(uint64) {} +func (*fakePeerManager) SendWants(context.Context, peer.ID, []cid.Cid, []cid.Cid) bool { return true } +func (*fakePeerManager) BroadcastWantHaves(context.Context, []cid.Cid) {} func (fpm *fakePeerManager) SendCancels(ctx context.Context, cancels []cid.Cid) { fpm.lk.Lock() defer fpm.lk.Unlock() diff --git a/bitswap/client/wantlist/wantlist.go b/bitswap/client/wantlist/wantlist.go index 6cb71eecc..245085af9 100644 --- a/bitswap/client/wantlist/wantlist.go +++ b/bitswap/client/wantlist/wantlist.go @@ -130,13 +130,3 @@ func (w *Wantlist) Entries() []Entry { w.cached = es return es[0:len(es):len(es)] } - -// Absorb all the entries in other into this want list -func (w *Wantlist) Absorb(other *Wantlist) { - // Invalidate the cache up-front to avoid doing any work trying to keep it up-to-date. - w.cached = nil - - for _, e := range other.Entries() { - w.Add(e.Cid, e.Priority, e.WantType) - } -} diff --git a/bitswap/client/wantlist/wantlist_test.go b/bitswap/client/wantlist/wantlist_test.go index 07d4ce415..901fe0d67 100644 --- a/bitswap/client/wantlist/wantlist_test.go +++ b/bitswap/client/wantlist/wantlist_test.go @@ -157,52 +157,6 @@ func TestAddBlockThenRemoveAny(t *testing.T) { } } -func TestAbsort(t *testing.T) { - wl := New() - wl.Add(testcids[0], 5, pb.Message_Wantlist_Block) - wl.Add(testcids[1], 4, pb.Message_Wantlist_Have) - wl.Add(testcids[2], 3, pb.Message_Wantlist_Have) - - wl2 := New() - wl2.Add(testcids[0], 2, pb.Message_Wantlist_Have) - wl2.Add(testcids[1], 1, pb.Message_Wantlist_Block) - - wl.Absorb(wl2) - - e, ok := wl.Contains(testcids[0]) - if !ok { - t.Fatal("expected to have ", testcids[0]) - } - if e.Priority != 5 { - t.Fatal("expected priority 5") - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected type ", pb.Message_Wantlist_Block) - } - - e, ok = wl.Contains(testcids[1]) - if !ok { - t.Fatal("expected to have ", testcids[1]) - } - if e.Priority != 1 { - t.Fatal("expected priority 1") - } - if e.WantType != pb.Message_Wantlist_Block { - t.Fatal("expected type ", pb.Message_Wantlist_Block) - } - - e, ok = wl.Contains(testcids[2]) - if !ok { - t.Fatal("expected to have ", testcids[2]) - } - if e.Priority != 3 { - t.Fatal("expected priority 3") - } - if e.WantType != pb.Message_Wantlist_Have { - t.Fatal("expected type ", pb.Message_Wantlist_Have) - } -} - func TestSortEntries(t *testing.T) { wl := New() diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go index b30bcc87f..646b56b0d 100644 --- a/bitswap/internal/defaults/defaults.go +++ b/bitswap/internal/defaults/defaults.go @@ -6,8 +6,8 @@ import ( ) const ( - // these requests take at _least_ two minutes at the moment. - ProvideTimeout = time.Minute * 3 + // ProvSearchDelay specifies how long to wait before we start + // broadcasting outstanding wants for the first time. ProvSearchDelay = time.Second // Number of concurrent workers in decision engine that process requests to the blockstore @@ -20,11 +20,6 @@ const ( BitswapMaxOutstandingBytesPerPeer = 1 << 20 // the number of bytes we attempt to make each outgoing bitswap message BitswapEngineTargetMessageSize = 16 * 1024 - // HasBlockBufferSize is the buffer size of the channel for new blocks - // that need to be provided. They should get pulled over by the - // provideCollector even before they are actually provided. - // TODO: Does this need to be this large givent that? - HasBlockBufferSize = 256 // Maximum size of the wantlist we are willing to keep in memory. MaxQueuedWantlistEntiresPerPeer = 1024 diff --git a/bitswap/network/connecteventmanager.go b/bitswap/network/connecteventmanager.go index 88337fce3..bf3766089 100644 --- a/bitswap/network/connecteventmanager.go +++ b/bitswap/network/connecteventmanager.go @@ -3,6 +3,7 @@ package network import ( "sync" + "github.com/gammazero/deque" "github.com/libp2p/go-libp2p/core/peer" ) @@ -25,7 +26,7 @@ type connectEventManager struct { cond sync.Cond peers map[peer.ID]*peerState - changeQueue []peer.ID + changeQueue deque.Deque[peer.ID] stop bool done chan struct{} } @@ -75,7 +76,7 @@ func (c *connectEventManager) setState(p peer.ID, newState state) { state.newState = newState if !state.pending && state.newState != state.curState { state.pending = true - c.changeQueue = append(c.changeQueue, p) + c.changeQueue.PushBack(p) c.cond.Broadcast() } } @@ -83,7 +84,7 @@ func (c *connectEventManager) setState(p peer.ID, newState state) { // Waits for a change to be enqueued, or for the event manager to be stopped. Returns false if the // connect event manager has been stopped. func (c *connectEventManager) waitChange() bool { - for !c.stop && len(c.changeQueue) == 0 { + for !c.stop && c.changeQueue.Len() == 0 { c.cond.Wait() } return !c.stop @@ -95,9 +96,7 @@ func (c *connectEventManager) worker() { defer close(c.done) for c.waitChange() { - pid := c.changeQueue[0] - c.changeQueue[0] = peer.ID("") // free the peer ID (slicing won't do that) - c.changeQueue = c.changeQueue[1:] + pid := c.changeQueue.PopFront() state, ok := c.peers[pid] // If we've disconnected and forgotten, continue. diff --git a/bitswap/network/connecteventmanager_test.go b/bitswap/network/connecteventmanager_test.go index 3107efbcf..5d57fc104 100644 --- a/bitswap/network/connecteventmanager_test.go +++ b/bitswap/network/connecteventmanager_test.go @@ -40,7 +40,7 @@ func wait(t *testing.T, c *connectEventManager) { require.Eventually(t, func() bool { c.lk.RLock() defer c.lk.RUnlock() - return len(c.changeQueue) == 0 + return c.changeQueue.Len() == 0 }, time.Second, time.Millisecond, "connection event manager never processed events") } diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go index 6ea0fc525..6c56bab14 100644 --- a/bitswap/network/interface.go +++ b/bitswap/network/interface.go @@ -40,7 +40,7 @@ type BitSwapNetwork interface { // Stop stops the network service. Stop() - ConnectTo(context.Context, peer.ID) error + Connect(context.Context, peer.AddrInfo) error DisconnectFrom(context.Context, peer.ID) error NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) @@ -49,8 +49,6 @@ type BitSwapNetwork interface { Stats() Stats - Routing - Pinger } @@ -88,7 +86,7 @@ type Receiver interface { // network. type Routing interface { // FindProvidersAsync returns a channel of providers for the given key. - FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.AddrInfo // Provide provides the key to the network. Provide(context.Context, cid.Cid) error diff --git a/bitswap/network/ipfs_impl.go b/bitswap/network/ipfs_impl.go index 7acc3abcc..72f86d099 100644 --- a/bitswap/network/ipfs_impl.go +++ b/bitswap/network/ipfs_impl.go @@ -11,15 +11,12 @@ import ( bsmsg "github.com/ipfs/boxo/bitswap/message" "github.com/ipfs/boxo/bitswap/network/internal" - "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p/core/routing" "github.com/libp2p/go-libp2p/p2p/protocol/ping" "github.com/libp2p/go-msgio" ma "github.com/multiformats/go-multiaddr" @@ -36,12 +33,11 @@ var ( ) // NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. -func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { +func NewFromIpfsHost(host host.Host, opts ...NetOpt) BitSwapNetwork { s := processSettings(opts...) bitswapNetwork := impl{ - host: host, - routing: r, + host: host, protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, @@ -73,7 +69,6 @@ type impl struct { stats Stats host host.Host - routing routing.ContentRouting connectEvtMgr *connectEventManager protocolBitswapNoVers protocol.ID @@ -104,7 +99,7 @@ func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, erro tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) defer cancel() - if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { + if err := s.bsnet.Connect(ctx, peer.AddrInfo{ID: s.to}); err != nil { return nil, err } @@ -147,8 +142,10 @@ func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMess // Perform a function with multiple attempts, and a timeout func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { - // Try to call the function repeatedly var err error + var timer *time.Timer + + // Try to call the function repeatedly for i := 0; i < s.opts.MaxRetries; i++ { if err = fn(); err == nil { // Attempt was successful @@ -179,10 +176,17 @@ func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) return err } + if timer == nil { + timer = time.NewTimer(s.opts.SendErrorBackoff) + defer timer.Stop() + } else { + timer.Reset(s.opts.SendErrorBackoff) + } + select { case <-ctx.Done(): return ctx.Err() - case <-time.After(s.opts.SendErrorBackoff): + case <-timer.C: // wait a short time in case disconnect notifications are still propagating log.Infof("send message to %s failed but context was not Done: %s", s.to, err) } @@ -360,40 +364,17 @@ func (bsnet *impl) Stop() { bsnet.host.Network().StopNotify((*netNotifiee)(bsnet)) } -func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { - return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) +func (bsnet *impl) Connect(ctx context.Context, p peer.AddrInfo) error { + if p.ID == bsnet.host.ID() { + return nil + } + return bsnet.host.Connect(ctx, p) } func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { return bsnet.host.Network().ClosePeer(p) } -// FindProvidersAsync returns a channel of providers for the given key. -func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - out := make(chan peer.ID, max) - go func() { - defer close(out) - providers := bsnet.routing.FindProvidersAsync(ctx, k, max) - for info := range providers { - if info.ID == bsnet.host.ID() { - continue // ignore self as provider - } - bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) - select { - case <-ctx.Done(): - return - case out <- info.ID: - } - } - }() - return out -} - -// Provide provides the key to the network -func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { - return bsnet.routing.Provide(ctx, k, true) -} - // handleNewStream receives a new stream from the network. func (bsnet *impl) handleNewStream(s network.Stream) { defer s.Close() diff --git a/bitswap/network/ipfs_impl_test.go b/bitswap/network/ipfs_impl_test.go index 91e998846..bfba5709d 100644 --- a/bitswap/network/ipfs_impl_test.go +++ b/bitswap/network/ipfs_impl_test.go @@ -13,8 +13,6 @@ import ( bsnet "github.com/ipfs/boxo/bitswap/network" "github.com/ipfs/boxo/bitswap/network/internal" tn "github.com/ipfs/boxo/bitswap/testnet" - mockrouting "github.com/ipfs/boxo/routing/mock" - ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-test/random" tnet "github.com/libp2p/go-libp2p-testing/net" "github.com/libp2p/go-libp2p/core/host" @@ -170,8 +168,7 @@ func TestMessageSendAndReceive(t *testing.T) { defer cancel() mn := mocknet.New() defer mn.Close() - mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) + streamNet, err := tn.StreamNet(ctx, mn) if err != nil { t.Fatal("Unable to setup network") } @@ -191,7 +188,7 @@ func TestMessageSendAndReceive(t *testing.T) { if err != nil { t.Fatal(err) } - err = bsnet1.ConnectTo(ctx, p2.ID()) + err = bsnet1.Connect(ctx, peer.AddrInfo{ID: p2.ID()}) if err != nil { t.Fatal(err) } @@ -200,7 +197,7 @@ func TestMessageSendAndReceive(t *testing.T) { t.Fatal("did not connect peer") case <-r1.connectionEvent: } - err = bsnet2.ConnectTo(ctx, p1.ID()) + err = bsnet2.Connect(ctx, peer.AddrInfo{ID: p1.ID()}) if err != nil { t.Fatal(err) } @@ -275,7 +272,6 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec // create network mn := mocknet.New() defer mn.Close() - mr := mockrouting.NewServer() // Host 1 h1, err := mn.AddPeer(p1.PrivateKey(), p1.Address()) @@ -283,8 +279,7 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec t.Fatal(err) } eh1 := &ErrHost{Host: h1} - routing1 := mr.ClientWithDatastore(context.TODO(), p1, ds.NewMapDatastore()) - bsnet1 := bsnet.NewFromIpfsHost(eh1, routing1) + bsnet1 := bsnet.NewFromIpfsHost(eh1) bsnet1.Start(r1) t.Cleanup(bsnet1.Stop) if r1.listener != nil { @@ -297,8 +292,7 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec t.Fatal(err) } eh2 := &ErrHost{Host: h2} - routing2 := mr.ClientWithDatastore(context.TODO(), p2, ds.NewMapDatastore()) - bsnet2 := bsnet.NewFromIpfsHost(eh2, routing2) + bsnet2 := bsnet.NewFromIpfsHost(eh2) bsnet2.Start(r2) t.Cleanup(bsnet2.Stop) if r2.listener != nil { @@ -310,7 +304,7 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec if err != nil { t.Fatal(err) } - err = bsnet1.ConnectTo(ctx, p2.ID()) + err = bsnet1.Connect(ctx, peer.AddrInfo{ID: p2.ID()}) if err != nil { t.Fatal(err) } @@ -319,7 +313,7 @@ func prepareNetwork(t *testing.T, ctx context.Context, p1 tnet.Identity, r1 *rec t.Fatal("Expected connect event") } - err = bsnet2.ConnectTo(ctx, p1.ID()) + err = bsnet2.Connect(ctx, peer.AddrInfo{ID: p1.ID()}) if err != nil { t.Fatal(err) } @@ -454,8 +448,7 @@ func TestSupportsHave(t *testing.T) { ctx := context.Background() mn := mocknet.New() defer mn.Close() - mr := mockrouting.NewServer() - streamNet, err := tn.StreamNet(ctx, mn, mr) + streamNet, err := tn.StreamNet(ctx, mn) if err != nil { t.Fatalf("Unable to setup network: %s", err) } diff --git a/bitswap/options.go b/bitswap/options.go index 6a98b27db..736b58914 100644 --- a/bitswap/options.go +++ b/bitswap/options.go @@ -43,10 +43,6 @@ func TaskWorkerCount(count int) Option { return Option{server.TaskWorkerCount(count)} } -func ProvideEnabled(enabled bool) Option { - return Option{server.ProvideEnabled(enabled)} -} - func SetSendDontHaves(send bool) Option { return Option{server.SetSendDontHaves(send)} } @@ -106,3 +102,11 @@ func WithTracer(tap tracer.Tracer) Option { }), } } + +func WithClientOption(opt client.Option) Option { + return Option{opt} +} + +func WithServerOption(opt server.Option) Option { + return Option{opt} +} diff --git a/bitswap/server/internal/decision/engine.go b/bitswap/server/internal/decision/engine.go index e67c15fda..a856ab44b 100644 --- a/bitswap/server/internal/decision/engine.go +++ b/bitswap/server/internal/decision/engine.go @@ -25,7 +25,6 @@ import ( "github.com/ipfs/go-peertaskqueue" "github.com/ipfs/go-peertaskqueue/peertask" "github.com/ipfs/go-peertaskqueue/peertracker" - process "github.com/jbenet/goprocess" "github.com/libp2p/go-libp2p/core/peer" mh "github.com/multiformats/go-multihash" ) @@ -195,6 +194,9 @@ type Engine struct { taskWorkerLock sync.Mutex taskWorkerCount int + waitWorkers sync.WaitGroup + cancel context.CancelFunc + closeOnce sync.Once targetMessageSize int @@ -382,6 +384,8 @@ func NewEngine( self peer.ID, opts ...Option, ) *Engine { + ctx, cancel := context.WithCancel(ctx) + e := &Engine{ scoreLedger: NewDefaultScoreLedger(), bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, @@ -401,6 +405,7 @@ func NewEngine( tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), maxQueuedWantlistEntriesPerPeer: defaults.MaxQueuedWantlistEntiresPerPeer, maxCidSize: defaults.MaximumAllowedCid, + cancel: cancel, } for _, opt := range opts { @@ -437,6 +442,8 @@ func NewEngine( log.Infow("Replace WantHave with WantBlock is enabled", "maxSize", e.wantHaveReplaceSize) } + e.startWorkers(ctx) + return e } @@ -462,7 +469,7 @@ func (e *Engine) SetSendDontHaves(send bool) { // Starts the score ledger. Before start the function checks and, // if it is unset, initializes the scoreLedger with the default // implementation. -func (e *Engine) startScoreLedger(px process.Process) { +func (e *Engine) startScoreLedger() { e.scoreLedger.Start(func(p peer.ID, score int) { if score == 0 { e.peerTagger.UntagPeer(p, e.tagUseful) @@ -470,35 +477,34 @@ func (e *Engine) startScoreLedger(px process.Process) { e.peerTagger.TagPeer(p, e.tagUseful, score) } }) - px.Go(func(ppx process.Process) { - <-ppx.Closing() - e.scoreLedger.Stop() - }) } -func (e *Engine) startBlockstoreManager(px process.Process) { +// startWorkers starts workers to handle requests from other nodes for the data +// on this node. +func (e *Engine) startWorkers(ctx context.Context) { e.bsm.start() - px.Go(func(ppx process.Process) { - <-ppx.Closing() - e.bsm.stop() - }) -} - -// Start up workers to handle requests from other nodes for the data on this node -func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { - e.startBlockstoreManager(px) - e.startScoreLedger(px) + e.startScoreLedger() e.taskWorkerLock.Lock() defer e.taskWorkerLock.Unlock() + e.waitWorkers.Add(e.taskWorkerCount) for i := 0; i < e.taskWorkerCount; i++ { - px.Go(func(_ process.Process) { - e.taskWorker(ctx) - }) + go e.taskWorker(ctx) } } +// Close shuts down the decision engine and returns after all workers have +// finished. Safe to call multiple times/concurrently. +func (e *Engine) Close() { + e.closeOnce.Do(func() { + e.cancel() + e.bsm.stop() + e.scoreLedger.Stop() + }) + e.waitWorkers.Wait() +} + func (e *Engine) onPeerAdded(p peer.ID) { e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) } @@ -524,6 +530,7 @@ func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { // and adds them to an envelope that is passed off to the bitswap workers, // which send the message to the network. func (e *Engine) taskWorker(ctx context.Context) { + defer e.waitWorkers.Done() defer e.taskWorkerExit() for { oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index 5cc1375c7..cab5b5691 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -23,7 +23,6 @@ import ( ds "github.com/ipfs/go-datastore" dssync "github.com/ipfs/go-datastore/sync" "github.com/ipfs/go-test/random" - process "github.com/jbenet/goprocess" peer "github.com/libp2p/go-libp2p/core/peer" libp2ptest "github.com/libp2p/go-libp2p/core/test" mh "github.com/multiformats/go-multihash" @@ -95,15 +94,14 @@ type engineSet struct { Blockstore blockstore.Blockstore } -func newTestEngine(ctx context.Context, idStr string, opts ...Option) engineSet { - return newTestEngineWithSampling(ctx, idStr, shortTerm, nil, clock.New(), opts...) +func newTestEngine(idStr string, opts ...Option) engineSet { + return newTestEngineWithSampling(idStr, shortTerm, nil, clock.New(), opts...) } -func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock, opts ...Option) engineSet { +func newTestEngineWithSampling(idStr string, peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock, opts ...Option) engineSet { fpt := &fakePeerTagger{} bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, append(opts[:len(opts):len(opts)], WithScoreLedger(NewTestScoreLedger(peerSampleInterval, sampleCh, clock)), WithBlockstoreWorkerCount(4))...) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, fpt, "localhost", 0, append(opts[:len(opts):len(opts)], WithScoreLedger(NewTestScoreLedger(peerSampleInterval, sampleCh, clock)), WithBlockstoreWorkerCount(4))...) return engineSet{ Peer: peer.ID(idStr), PeerTagger: fpt, @@ -113,20 +111,19 @@ func newTestEngineWithSampling(ctx context.Context, idStr string, peerSampleInte } func TestConsistentAccounting(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - sender := newTestEngine(ctx, "Ernie") - receiver := newTestEngine(ctx, "Bert") + sender := newTestEngine("Ernie") + defer sender.Engine.Close() + receiver := newTestEngine("Bert") + defer receiver.Engine.Close() // Send messages from Ernie to Bert for i := 0; i < 1000; i++ { - m := message.New(false) content := []string{"this", "is", "message", "i"} m.AddBlock(blocks.NewBlock([]byte(strings.Join(content, " ")))) sender.Engine.MessageSent(receiver.Peer, m) - receiver.Engine.MessageReceived(ctx, sender.Peer, m) + receiver.Engine.MessageReceived(context.Background(), sender.Peer, m) receiver.Engine.ReceivedBlocks(sender.Peer, m.Blocks()) } @@ -148,17 +145,17 @@ func TestConsistentAccounting(t *testing.T) { } func TestPeerIsAddedToPeersWhenMessageSent(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - sanfrancisco := newTestEngine(ctx, "sf") - seattle := newTestEngine(ctx, "sea") + sanfrancisco := newTestEngine("sf") + defer sanfrancisco.Engine.Close() + seattle := newTestEngine("sea") + defer seattle.Engine.Close() m := message.New(true) // We need to request something for it to add us as partner. m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) - seattle.Engine.MessageReceived(ctx, sanfrancisco.Peer, m) + seattle.Engine.MessageReceived(context.Background(), sanfrancisco.Peer, m) if seattle.Peer == sanfrancisco.Peer { t.Fatal("Sanity Check: Peers have same Key!") @@ -184,7 +181,6 @@ func peerIsPartner(p peer.ID, e *Engine) bool { } func newEngineForTesting( - ctx context.Context, bs blockstore.Blockstore, peerTagger PeerTagger, self peer.ID, @@ -192,14 +188,13 @@ func newEngineForTesting( opts ...Option, ) *Engine { opts = append(opts, WithWantHaveReplaceSize(wantHaveReplaceSize)) - return NewEngine(ctx, bs, peerTagger, self, opts...) + return NewEngine(context.Background(), bs, peerTagger, self, opts...) } func TestOutboxClosedWhenEngineClosed(t *testing.T) { t.SkipNow() // TODO implement *Engine.Close - ctx := context.Background() - e := newEngineForTesting(ctx, blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())), &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() var wg sync.WaitGroup wg.Add(1) go func() { @@ -526,9 +521,8 @@ func TestPartnerWantHaveWantBlockNonActive(t *testing.T) { testCases = onlyTestCases } - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() for i, testCase := range testCases { t.Logf("Test case %d:", i) for _, wl := range testCase.wls { @@ -683,9 +677,8 @@ func TestPartnerWantHaveWantBlockActive(t *testing.T) { testCases = onlyTestCases } - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() var next envChan for i, testCase := range testCases { @@ -866,11 +859,10 @@ func TestPartnerWantsThenCancels(t *testing.T) { } } - ctx := context.Background() for i := 0; i < numRounds; i++ { expected := make([][]string, 0, len(testcases)) - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() for _, testcase := range testcases { set := testcase[0] cancels := testcase[1] @@ -894,9 +886,8 @@ func TestSendReceivedBlocksToPeersThatWantThem(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) @@ -940,9 +931,8 @@ func TestSendDontHave(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) @@ -1006,9 +996,8 @@ func TestWantlistForPeer(t *testing.T) { partner := libp2ptest.RandPeerIDFatal(t) otherPeer := libp2ptest.RandPeerIDFatal(t) - ctx := context.Background() - e := newEngineForTesting(ctx, bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, &fakePeerTagger{}, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4)) + defer e.Close() blks := random.BlocksOfSize(4, 8*1024) msg := message.New(false) @@ -1039,9 +1028,6 @@ func TestWantlistForPeer(t *testing.T) { } func TestTaskComparator(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - keys := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"} cids := make(map[cid.Cid]int) blks := make([]blocks.Block, 0, len(keys)) @@ -1054,19 +1040,22 @@ func TestTaskComparator(t *testing.T) { fpt := &fakePeerTagger{} sl := NewTestScoreLedger(shortTerm, nil, clock.New()) bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + if err := bs.PutMany(ctx, blks); err != nil { t.Fatal(err) } // use a single task worker so that the order of outgoing messages is deterministic - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithTaskWorkerCount(1), + e := newEngineForTesting(bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithTaskWorkerCount(1), // if this Option is omitted, the test fails WithTaskComparator(func(ta, tb *TaskInfo) bool { // prioritize based on lexicographic ordering of block content return cids[ta.Cid] < cids[tb.Cid] }), ) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + defer e.Close() // rely on randomness of Go map's iteration order to add Want entries in random order peerIDs := make([]peer.ID, len(keys)) @@ -1093,9 +1082,6 @@ func TestTaskComparator(t *testing.T) { } func TestPeerBlockFilter(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - // Generate a few keys keys := []string{"a", "b", "c", "d"} blks := make([]blocks.Block, 0, len(keys)) @@ -1114,11 +1100,14 @@ func TestPeerBlockFilter(t *testing.T) { fpt := &fakePeerTagger{} sl := NewTestScoreLedger(shortTerm, nil, clock.New()) bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + if err := bs.PutMany(ctx, blks); err != nil { t.Fatal(err) } - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), + e := newEngineForTesting(bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { // peer 0 has access to everything if p == peerIDs[0] { @@ -1132,7 +1121,7 @@ func TestPeerBlockFilter(t *testing.T) { return blks[3].Cid().Equals(c) }), ) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + defer e.Close() // Setup the test type testCaseEntry struct { @@ -1252,9 +1241,6 @@ func TestPeerBlockFilter(t *testing.T) { } func TestPeerBlockFilterMutability(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - // Generate a few keys keys := []string{"a", "b", "c", "d"} blks := make([]blocks.Block, 0, len(keys)) @@ -1269,18 +1255,21 @@ func TestPeerBlockFilterMutability(t *testing.T) { fpt := &fakePeerTagger{} sl := NewTestScoreLedger(shortTerm, nil, clock.New()) bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + if err := bs.PutMany(ctx, blks); err != nil { t.Fatal(err) } filterAllowList := make(map[cid.Cid]bool) - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), + e := newEngineForTesting(bs, fpt, "localhost", 0, WithScoreLedger(sl), WithBlockstoreWorkerCount(4), WithPeerBlockRequestFilter(func(p peer.ID, c cid.Cid) bool { return filterAllowList[c] }), ) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + defer e.Close() // Setup the test type testCaseEntry struct { @@ -1421,10 +1410,10 @@ func TestPeerBlockFilterMutability(t *testing.T) { } func TestTaggingPeers(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - sanfrancisco := newTestEngine(ctx, "sf") - seattle := newTestEngine(ctx, "sea") + sanfrancisco := newTestEngine("sf") + defer sanfrancisco.Engine.Close() + seattle := newTestEngine("sea") + defer seattle.Engine.Close() keys := []string{"a", "b", "c", "d", "e"} for _, letter := range keys { @@ -1451,12 +1440,10 @@ func TestTaggingPeers(t *testing.T) { func TestTaggingUseful(t *testing.T) { const peerSampleIntervalHalf = 10 * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - sampleCh := make(chan struct{}) mockClock := clock.NewMock() - me := newTestEngineWithSampling(ctx, "engine", peerSampleIntervalHalf*2, sampleCh, mockClock) + me := newTestEngineWithSampling("engine", peerSampleIntervalHalf*2, sampleCh, mockClock) + defer me.Engine.Close() mockClock.Add(1 * time.Millisecond) friend := peer.ID("friend") @@ -1544,9 +1531,6 @@ func partnerCancels(e *Engine, keys []string, partner peer.ID) { type envChan <-chan *Envelope func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelope) { - ctx, cancel := context.WithTimeout(context.Background(), t) - defer cancel() - if next == nil { next = <-e.Outbox() // returns immediately } @@ -1558,7 +1542,7 @@ func getNextEnvelope(e *Engine, next envChan, t time.Duration) (envChan, *Envelo return nil, nil } return nil, env - case <-ctx.Done(): + case <-time.After(t): // log.Warnf("got timeout") } return next, nil @@ -1606,12 +1590,11 @@ func stringsComplement(set, subset []string) []string { } func TestWantlistDoesNotGrowPastLimit(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - const limit = 32 - warsaw := newTestEngine(ctx, "warsaw", WithMaxQueuedWantlistEntriesPerPeer(limit)) - riga := newTestEngine(ctx, "riga") + warsaw := newTestEngine("warsaw", WithMaxQueuedWantlistEntriesPerPeer(limit)) + defer warsaw.Engine.Close() + riga := newTestEngine("riga") + defer riga.Engine.Close() // Send in two messages to test reslicing. for i := 2; i != 0; i-- { @@ -1619,7 +1602,7 @@ func TestWantlistDoesNotGrowPastLimit(t *testing.T) { for j := limit * 3 / 4; j != 0; j-- { m.AddEntry(blocks.NewBlock([]byte(fmt.Sprint(i, j))).Cid(), 0, pb.Message_Wantlist_Block, true) } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) } if warsaw.Peer == riga.Peer { @@ -1633,19 +1616,19 @@ func TestWantlistDoesNotGrowPastLimit(t *testing.T) { } func TestWantlistGrowsToLimit(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - const limit = 32 - warsaw := newTestEngine(ctx, "warsaw", WithMaxQueuedWantlistEntriesPerPeer(limit)) - riga := newTestEngine(ctx, "riga") + warsaw := newTestEngine("warsaw", WithMaxQueuedWantlistEntriesPerPeer(limit)) + defer warsaw.Engine.Close() + riga := newTestEngine("riga") + defer riga.Engine.Close() // Send in two messages to test reslicing. m := message.New(false) for j := limit; j != 0; j-- { m.AddEntry(blocks.NewBlock([]byte(strconv.Itoa(j))).Cid(), 0, pb.Message_Wantlist_Block, true) } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) if warsaw.Peer == riga.Peer { t.Fatal("Sanity Check: Peers have same Key!") @@ -1658,12 +1641,11 @@ func TestWantlistGrowsToLimit(t *testing.T) { } func TestIgnoresCidsAboveLimit(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - const cidLimit = 64 - warsaw := newTestEngine(ctx, "warsaw", WithMaxCidSize(cidLimit)) - riga := newTestEngine(ctx, "riga") + warsaw := newTestEngine("warsaw", WithMaxCidSize(cidLimit)) + defer warsaw.Engine.Close() + riga := newTestEngine("riga") + defer riga.Engine.Close() // Send in two messages to test reslicing. m := message.New(true) @@ -1678,7 +1660,7 @@ func TestIgnoresCidsAboveLimit(t *testing.T) { rand.Read(hash[startOfDigest:]) m.AddEntry(cid.NewCidV1(cid.Raw, hash), 0, pb.Message_Wantlist_Block, true) - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) if warsaw.Peer == riga.Peer { t.Fatal("Sanity Check: Peers have same Key!") @@ -1691,11 +1673,10 @@ func TestIgnoresCidsAboveLimit(t *testing.T) { } func TestKillConnectionForInlineCid(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - warsaw := newTestEngine(ctx, "warsaw") - riga := newTestEngine(ctx, "riga") + warsaw := newTestEngine("warsaw") + defer warsaw.Engine.Close() + riga := newTestEngine("riga") + defer riga.Engine.Close() if warsaw.Peer == riga.Peer { t.Fatal("Sanity Check: Peers have same Key!") @@ -1715,7 +1696,7 @@ func TestKillConnectionForInlineCid(t *testing.T) { rand.Read(hash[startOfDigest:]) m.AddEntry(cid.NewCidV1(cid.Raw, hash), 0, pb.Message_Wantlist_Block, true) - if !warsaw.Engine.MessageReceived(ctx, riga.Peer, m) { + if !warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) { t.Fatal("connection was not killed when receiving inline in cancel") } @@ -1724,15 +1705,12 @@ func TestKillConnectionForInlineCid(t *testing.T) { m.AddEntry(blocks.NewBlock([]byte("Hæ")).Cid(), 0, pb.Message_Wantlist_Block, true) m.Cancel(cid.NewCidV1(cid.Raw, hash)) - if !warsaw.Engine.MessageReceived(ctx, riga.Peer, m) { + if !warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) { t.Fatal("connection was not killed when receiving inline in cancel") } } func TestWantlistBlocked(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - const limit = 32 bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) @@ -1752,15 +1730,17 @@ func TestWantlistBlocked(t *testing.T) { } fpt := &fakePeerTagger{} - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4), WithMaxQueuedWantlistEntriesPerPeer(limit)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, fpt, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4), WithMaxQueuedWantlistEntriesPerPeer(limit)) + defer e.Close() + warsaw := engineSet{ Peer: peer.ID("warsaw"), PeerTagger: fpt, Blockstore: bs, Engine: e, } - riga := newTestEngine(ctx, "riga") + riga := newTestEngine("riga") + defer riga.Engine.Close() if warsaw.Peer == riga.Peer { t.Fatal("Sanity Check: Peers have same Key!") } @@ -1773,7 +1753,7 @@ func TestWantlistBlocked(t *testing.T) { m.AddEntry(c, 1, pb.Message_Wantlist_Block, true) dontHaveCids[i] = c } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) wl := warsaw.Engine.WantlistForPeer(riga.Peer) // Check that all the dontHave wants are on the wantlist. for _, c := range dontHaveCids { @@ -1787,7 +1767,7 @@ func TestWantlistBlocked(t *testing.T) { for _, c := range haveCids { m.AddEntry(c, 1, pb.Message_Wantlist_Block, true) } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) wl = warsaw.Engine.WantlistForPeer(riga.Peer) // Check that all the dontHave wants are on the wantlist. for _, c := range haveCids { @@ -1804,7 +1784,7 @@ func TestWantlistBlocked(t *testing.T) { m.AddEntry(c, 1, pb.Message_Wantlist_Block, true) dontHaveCids[i] = c } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) // Check that all the new dontHave wants are not on the wantlist. for _, c := range dontHaveCids { if findCid(c, wl) { @@ -1815,9 +1795,6 @@ func TestWantlistBlocked(t *testing.T) { } func TestWantlistOverflow(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - const limit = 32 bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) @@ -1838,20 +1815,21 @@ func TestWantlistOverflow(t *testing.T) { } fpt := &fakePeerTagger{} - e := newEngineForTesting(ctx, bs, fpt, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4), WithMaxQueuedWantlistEntriesPerPeer(limit)) - e.StartWorkers(ctx, process.WithTeardown(func() error { return nil })) + e := newEngineForTesting(bs, fpt, "localhost", 0, WithScoreLedger(NewTestScoreLedger(shortTerm, nil, clock.New())), WithBlockstoreWorkerCount(4), WithMaxQueuedWantlistEntriesPerPeer(limit)) + defer e.Close() warsaw := engineSet{ Peer: peer.ID("warsaw"), PeerTagger: fpt, Blockstore: bs, Engine: e, } - riga := newTestEngine(ctx, "riga") + riga := newTestEngine("riga") + defer riga.Engine.Close() if warsaw.Peer == riga.Peer { t.Fatal("Sanity Check: Peers have same Key!") } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) // Check that the wantlist is at the size limit. wl := warsaw.Engine.WantlistForPeer(riga.Peer) if len(wl) != limit { @@ -1867,7 +1845,7 @@ func TestWantlistOverflow(t *testing.T) { m.AddEntry(c, 0, pb.Message_Wantlist_Block, true) lowPrioCids[i] = c } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) wl = warsaw.Engine.WantlistForPeer(riga.Peer) if len(wl) != limit { t.Fatal("wantlist size", len(wl), "does not match limit", limit) @@ -1893,7 +1871,7 @@ func TestWantlistOverflow(t *testing.T) { m.AddEntry(c, 10, pb.Message_Wantlist_Block, true) highPrioCids[i] = c } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) wl = warsaw.Engine.WantlistForPeer(riga.Peer) if len(wl) != limit { t.Fatal("wantlist size", len(wl), "does not match limit", limit) @@ -1918,7 +1896,7 @@ func TestWantlistOverflow(t *testing.T) { m.AddEntry(c, 0, pb.Message_Wantlist_Block, true) blockCids[i] = c } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) wl = warsaw.Engine.WantlistForPeer(riga.Peer) if len(wl) != limit { t.Fatal("wantlist size", len(wl), "does not match limit", limit) @@ -1942,7 +1920,7 @@ func TestWantlistOverflow(t *testing.T) { for _, c := range origCids { m.AddEntry(c, 0, pb.Message_Wantlist_Block, true) } - warsaw.Engine.MessageReceived(ctx, riga.Peer, m) + warsaw.Engine.MessageReceived(context.Background(), riga.Peer, m) wl = warsaw.Engine.WantlistForPeer(riga.Peer) for _, c := range origCids { if !findCid(c, wl) { diff --git a/bitswap/server/server.go b/bitswap/server/server.go index f3c5786a9..de27aecf2 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -20,21 +20,15 @@ import ( "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/ipfs/go-metrics-interface" - process "github.com/jbenet/goprocess" - procctx "github.com/jbenet/goprocess/context" "github.com/libp2p/go-libp2p/core/peer" "go.uber.org/zap" ) -var provideKeysBufferSize = 2048 - var ( log = logging.Logger("bitswap/server") sflog = log.Desugar() ) -const provideWorkerMax = 6 - type Option func(*Server) type Server struct { @@ -57,46 +51,28 @@ type Server struct { // the total number of simultaneous threads sending outgoing messages taskWorkerCount int - process process.Process - - // newBlocks is a channel for newly added blocks to be provided to the - // network. blocks pushed down this channel get buffered and fed to the - // provideKeys channel later on to avoid too much network activity - newBlocks chan cid.Cid - // provideKeys directly feeds provide workers - provideKeys chan cid.Cid + // Cancel stops the server + cancel context.CancelFunc + closing chan struct{} + closeOnce sync.Once + // waitWorkers waits for all worker goroutines to exit. + waitWorkers sync.WaitGroup // Extra options to pass to the decision manager engineOptions []decision.Option - - // the size of channel buffer to use - hasBlockBufferSize int - // whether or not to make provide announcements - provideEnabled bool } func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Server { ctx, cancel := context.WithCancel(ctx) - px := process.WithTeardown(func() error { - return nil - }) - go func() { - <-px.Closing() // process closes first - cancel() - }() - s := &Server{ - sentHistogram: bmetrics.SentHist(ctx), - sendTimeHistogram: bmetrics.SendTimeHist(ctx), - taskWorkerCount: defaults.BitswapTaskWorkerCount, - network: network, - process: px, - provideEnabled: true, - hasBlockBufferSize: defaults.HasBlockBufferSize, - provideKeys: make(chan cid.Cid, provideKeysBufferSize), + sentHistogram: bmetrics.SentHist(ctx), + sendTimeHistogram: bmetrics.SendTimeHist(ctx), + taskWorkerCount: defaults.BitswapTaskWorkerCount, + network: network, + cancel: cancel, + closing: make(chan struct{}), } - s.newBlocks = make(chan cid.Cid, s.hasBlockBufferSize) for _, o := range options { o(s) @@ -111,7 +87,7 @@ func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Bl ) s.engineOptions = nil - s.startWorkers(ctx, px) + s.startWorkers(ctx) return s } @@ -131,13 +107,6 @@ func WithTracer(tap tracer.Tracer) Option { } } -// ProvideEnabled is an option for enabling/disabling provide announcements -func ProvideEnabled(enabled bool) Option { - return func(bs *Server) { - bs.provideEnabled = enabled - } -} - func WithPeerBlockRequestFilter(pbrf decision.PeerBlockRequestFilter) Option { o := decision.WithPeerBlockRequestFilter(pbrf) return func(bs *Server) { @@ -241,16 +210,6 @@ func MaxCidSize(n uint) Option { } } -// HasBlockBufferSize configure how big the new blocks buffer should be. -func HasBlockBufferSize(count int) Option { - if count < 0 { - panic("cannot have negative buffer size") - } - return func(bs *Server) { - bs.hasBlockBufferSize = count - } -} - // WithWantHaveReplaceSize sets the maximum size of a block in bytes up to // which the bitswap server will replace a WantHave with a WantBlock response. // @@ -293,33 +252,20 @@ func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { return out } -func (bs *Server) startWorkers(ctx context.Context, px process.Process) { - bs.engine.StartWorkers(ctx, px) - +func (bs *Server) startWorkers(ctx context.Context) { // Start up workers to handle requests from other nodes for the data on this node + bs.waitWorkers.Add(bs.taskWorkerCount) for i := 0; i < bs.taskWorkerCount; i++ { i := i - px.Go(func(px process.Process) { - bs.taskWorker(ctx, i) - }) - } - - if bs.provideEnabled { - // Start up a worker to manage sending out provides messages - px.Go(func(px process.Process) { - bs.provideCollector(ctx) - }) - - // Spawn up multiple workers to handle incoming blocks - // consider increasing number if providing blocks bottlenecks - // file transfers - px.Go(bs.provideWorker) + go bs.taskWorker(ctx, i) } } func (bs *Server) taskWorker(ctx context.Context, id int) { - defer log.Debug("bitswap task worker shutting down...") + defer bs.waitWorkers.Done() + log := log.With("ID", id) + defer log.Debug("bitswap task worker shutting down...") for { log.Debug("Bitswap.TaskWorker.Loop") select { @@ -341,8 +287,7 @@ func (bs *Server) taskWorker(ctx context.Context, id int) { } bs.sendBlocks(ctx, envelope) - dur := time.Since(start) - bs.sendTimeHistogram.Observe(dur.Seconds()) + bs.sendTimeHistogram.Observe(time.Since(start).Seconds()) case <-ctx.Done(): return @@ -422,10 +367,9 @@ func (bs *Server) sendBlocks(ctx context.Context, env *decision.Envelope) { } type Stat struct { - Peers []string - ProvideBufLen int - BlocksSent uint64 - DataSent uint64 + Peers []string + BlocksSent uint64 + DataSent uint64 } // Stat returns aggregated statistics about bitswap operations @@ -433,7 +377,6 @@ func (bs *Server) Stat() (Stat, error) { bs.counterLk.Lock() s := bs.counters bs.counterLk.Unlock() - s.ProvideBufLen = len(bs.newBlocks) peers := bs.engine.Peers() peersStr := make([]string, len(peers)) @@ -463,7 +406,7 @@ func (bs *Server) NotifyNewBlock(ctx context.Context, blk blocks.Block) error { // that those blocks are available in the blockstore before calling this function. func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { select { - case <-bs.process.Closing(): + case <-bs.closing: return errors.New("bitswap is closed") default: } @@ -471,107 +414,9 @@ func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) err // Send wanted blocks to decision engine bs.engine.NotifyNewBlocks(blks) - // If the reprovider is enabled, send block to reprovider - if bs.provideEnabled { - for _, blk := range blks { - select { - case bs.newBlocks <- blk.Cid(): - // send block off to be reprovided - case <-bs.process.Closing(): - return bs.process.Close() - } - } - } - return nil } -func (bs *Server) provideCollector(ctx context.Context) { - defer close(bs.provideKeys) - var toProvide []cid.Cid - var nextKey cid.Cid - var keysOut chan cid.Cid - - for { - select { - case blkey, ok := <-bs.newBlocks: - if !ok { - log.Debug("newBlocks channel closed") - return - } - - if keysOut == nil { - nextKey = blkey - keysOut = bs.provideKeys - } else { - toProvide = append(toProvide, blkey) - } - case keysOut <- nextKey: - if len(toProvide) > 0 { - nextKey = toProvide[0] - toProvide = toProvide[1:] - } else { - keysOut = nil - } - case <-ctx.Done(): - return - } - } -} - -func (bs *Server) provideWorker(px process.Process) { - // FIXME: OnClosingContext returns a _custom_ context type. - // Unfortunately, deriving a new cancelable context from this custom - // type fires off a goroutine. To work around this, we create a single - // cancelable context up-front and derive all sub-contexts from that. - // - // See: https://github.com/ipfs/go-ipfs/issues/5810 - ctx := procctx.OnClosingContext(px) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - limit := make(chan struct{}, provideWorkerMax) - - limitedGoProvide := func(k cid.Cid, wid int) { - defer func() { - // replace token when done - <-limit - }() - - log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) - defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) - - ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx - defer cancel() - - if err := bs.network.Provide(ctx, k); err != nil { - log.Warn(err) - } - } - - // worker spawner, reads from bs.provideKeys until it closes, spawning a - // _ratelimited_ number of workers to handle each key. - for wid := 2; ; wid++ { - log.Debug("Bitswap.ProvideWorker.Loop") - - select { - case <-px.Closing(): - return - case k, ok := <-bs.provideKeys: - if !ok { - log.Debug("provideKeys channel closed") - return - } - select { - case <-px.Closing(): - return - case limit <- struct{}{}: - go limitedGoProvide(k, wid) - } - } - } -} - func (bs *Server) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { // This call records changes to wantlists, blocks received, // and number of bytes transfered. @@ -608,7 +453,13 @@ func (bs *Server) PeerDisconnected(p peer.ID) { bs.engine.PeerDisconnected(p) } -// Close is called to shutdown the Client -func (bs *Server) Close() error { - return bs.process.Close() +// Close is called to shutdown the Server. Returns when all workers and +// decision engine have finished. Safe to calling multiple times/concurrently. +func (bs *Server) Close() { + bs.closeOnce.Do(func() { + close(bs.closing) + bs.cancel() + }) + bs.engine.Close() + bs.waitWorkers.Wait() } diff --git a/bitswap/testinstance/testinstance.go b/bitswap/testinstance/testinstance.go index 5a052b831..f09831b65 100644 --- a/bitswap/testinstance/testinstance.go +++ b/bitswap/testinstance/testinstance.go @@ -8,6 +8,7 @@ import ( bsnet "github.com/ipfs/boxo/bitswap/network" tn "github.com/ipfs/boxo/bitswap/testnet" blockstore "github.com/ipfs/boxo/blockstore" + mockrouting "github.com/ipfs/boxo/routing/mock" ds "github.com/ipfs/go-datastore" delayed "github.com/ipfs/go-datastore/delayed" ds_sync "github.com/ipfs/go-datastore/sync" @@ -15,11 +16,12 @@ import ( tnet "github.com/libp2p/go-libp2p-testing/net" p2ptestutil "github.com/libp2p/go-libp2p-testing/netutil" peer "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" ) // NewTestInstanceGenerator generates a new InstanceGenerator for the given // testnet -func NewTestInstanceGenerator(net tn.Network, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) InstanceGenerator { +func NewTestInstanceGenerator(net tn.Network, routing mockrouting.Server, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) InstanceGenerator { ctx, cancel := context.WithCancel(context.Background()) return InstanceGenerator{ net: net, @@ -28,6 +30,7 @@ func NewTestInstanceGenerator(net tn.Network, netOptions []bsnet.NetOpt, bsOptio cancel: cancel, bsOptions: bsOptions, netOptions: netOptions, + routing: routing, } } @@ -39,6 +42,7 @@ type InstanceGenerator struct { cancel context.CancelFunc bsOptions []bitswap.Option netOptions []bsnet.NetOpt + routing mockrouting.Server } // Close closes the clobal context, shutting down all test instances @@ -54,7 +58,7 @@ func (g *InstanceGenerator) Next() Instance { if err != nil { panic("FIXME") // TODO change signature } - return NewInstance(g.ctx, g.net, p, g.netOptions, g.bsOptions) + return NewInstance(g.ctx, g.net, g.routing.Client(p), p, g.netOptions, g.bsOptions) } // Instances creates N test instances of bitswap + dependencies and connects @@ -74,7 +78,7 @@ func ConnectInstances(instances []Instance) { for i, inst := range instances { for j := i + 1; j < len(instances); j++ { oinst := instances[j] - err := inst.Adapter.ConnectTo(context.Background(), oinst.Peer) + err := inst.Adapter.Connect(context.Background(), peer.AddrInfo{ID: oinst.Identity.ID()}) if err != nil { panic(err.Error()) } @@ -84,18 +88,15 @@ func ConnectInstances(instances []Instance) { // Instance is a test instance of bitswap + dependencies for integration testing type Instance struct { - Peer peer.ID + Identity tnet.Identity + Datastore ds.Batching Exchange *bitswap.Bitswap - blockstore blockstore.Blockstore + Blockstore blockstore.Blockstore Adapter bsnet.BitSwapNetwork + Routing routing.Routing blockstoreDelay delay.D } -// Blockstore returns the block store for this test instance -func (i *Instance) Blockstore() blockstore.Blockstore { - return i.blockstore -} - // SetBlockstoreLatency customizes the artificial delay on receiving blocks // from a blockstore test instance. func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { @@ -107,26 +108,28 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // NB: It's easy make mistakes by providing the same peer ID to two different // instances. To safeguard, use the InstanceGenerator to generate instances. It's // just a much better idea. -func NewInstance(ctx context.Context, net tn.Network, p tnet.Identity, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) Instance { +func NewInstance(ctx context.Context, net tn.Network, router routing.Routing, p tnet.Identity, netOptions []bsnet.NetOpt, bsOptions []bitswap.Option) Instance { bsdelay := delay.Fixed(0) adapter := net.Adapter(p, netOptions...) dstore := ds_sync.MutexWrap(delayed.New(ds.NewMapDatastore(), bsdelay)) + ds := ds_sync.MutexWrap(dstore) bstore, err := blockstore.CachedBlockstore(ctx, - blockstore.NewBlockstore(ds_sync.MutexWrap(dstore)), + blockstore.NewBlockstore(ds), blockstore.DefaultCacheOpts()) if err != nil { panic(err.Error()) // FIXME perhaps change signature and return error. } - bs := bitswap.New(ctx, adapter, bstore, bsOptions...) - + bs := bitswap.New(ctx, adapter, router, bstore, bsOptions...) return Instance{ + Datastore: ds, Adapter: adapter, - Peer: p.ID(), + Identity: p, Exchange: bs, - blockstore: bstore, + Routing: router, + Blockstore: bstore, blockstoreDelay: bsdelay, } } diff --git a/bitswap/testnet/network_test.go b/bitswap/testnet/network_test.go index 0947eff3e..2d45e09b1 100644 --- a/bitswap/testnet/network_test.go +++ b/bitswap/testnet/network_test.go @@ -8,7 +8,6 @@ import ( bsmsg "github.com/ipfs/boxo/bitswap/message" bsnet "github.com/ipfs/boxo/bitswap/network" - mockrouting "github.com/ipfs/boxo/routing/mock" blocks "github.com/ipfs/go-block-format" delay "github.com/ipfs/go-ipfs-delay" @@ -17,7 +16,7 @@ import ( ) func TestSendMessageAsyncButWaitForResponse(t *testing.T) { - net := VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) + net := VirtualNetwork(delay.Fixed(0)) responderPeer := tnet.RandIdentityOrFatal(t) waiter := net.Adapter(tnet.RandIdentityOrFatal(t)) responder := net.Adapter(responderPeer) diff --git a/bitswap/testnet/peernet.go b/bitswap/testnet/peernet.go index e4df19699..84fa70c6e 100644 --- a/bitswap/testnet/peernet.go +++ b/bitswap/testnet/peernet.go @@ -5,9 +5,6 @@ import ( bsnet "github.com/ipfs/boxo/bitswap/network" - mockrouting "github.com/ipfs/boxo/routing/mock" - ds "github.com/ipfs/go-datastore" - tnet "github.com/libp2p/go-libp2p-testing/net" "github.com/libp2p/go-libp2p/core/peer" mockpeernet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -15,12 +12,11 @@ import ( type peernet struct { mockpeernet.Mocknet - routingserver mockrouting.Server } // StreamNet is a testnet that uses libp2p's MockNet -func StreamNet(ctx context.Context, net mockpeernet.Mocknet, rs mockrouting.Server) (Network, error) { - return &peernet{net, rs}, nil +func StreamNet(ctx context.Context, net mockpeernet.Mocknet) (Network, error) { + return &peernet{net}, nil } func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNetwork { @@ -28,8 +24,8 @@ func (pn *peernet) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapN if err != nil { panic(err.Error()) } - routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore()) - return bsnet.NewFromIpfsHost(client, routing, opts...) + + return bsnet.NewFromIpfsHost(client, opts...) } func (pn *peernet) HasPeer(p peer.ID) bool { diff --git a/bitswap/testnet/virtual.go b/bitswap/testnet/virtual.go index 914044aed..53e56d67d 100644 --- a/bitswap/testnet/virtual.go +++ b/bitswap/testnet/virtual.go @@ -8,30 +8,25 @@ import ( "sync/atomic" "time" + "github.com/gammazero/deque" bsmsg "github.com/ipfs/boxo/bitswap/message" bsnet "github.com/ipfs/boxo/bitswap/network" - - mockrouting "github.com/ipfs/boxo/routing/mock" - cid "github.com/ipfs/go-cid" delay "github.com/ipfs/go-ipfs-delay" - tnet "github.com/libp2p/go-libp2p-testing/net" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/peer" protocol "github.com/libp2p/go-libp2p/core/protocol" - "github.com/libp2p/go-libp2p/core/routing" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" "github.com/libp2p/go-libp2p/p2p/protocol/ping" ) // VirtualNetwork generates a new testnet instance - a fake network that // is used to simulate sending messages. -func VirtualNetwork(rs mockrouting.Server, d delay.D) Network { +func VirtualNetwork(d delay.D) Network { return &network{ latencies: make(map[peer.ID]map[peer.ID]time.Duration), clients: make(map[peer.ID]*receiverQueue), delay: d, - routingserver: rs, isRateLimited: false, rateLimitGenerator: nil, conns: make(map[string]struct{}), @@ -45,13 +40,12 @@ type RateLimitGenerator interface { // RateLimitedVirtualNetwork generates a testnet instance where nodes are rate // limited in the upload/download speed. -func RateLimitedVirtualNetwork(rs mockrouting.Server, d delay.D, rateLimitGenerator RateLimitGenerator) Network { +func RateLimitedVirtualNetwork(d delay.D, rateLimitGenerator RateLimitGenerator) Network { return &network{ latencies: make(map[peer.ID]map[peer.ID]time.Duration), rateLimiters: make(map[peer.ID]map[peer.ID]*mocknet.RateLimiter), clients: make(map[peer.ID]*receiverQueue), delay: d, - routingserver: rs, isRateLimited: true, rateLimitGenerator: rateLimitGenerator, conns: make(map[string]struct{}), @@ -63,7 +57,6 @@ type network struct { latencies map[peer.ID]map[peer.ID]time.Duration rateLimiters map[peer.ID]map[peer.ID]*mocknet.RateLimiter clients map[peer.ID]*receiverQueue - routingserver mockrouting.Server delay delay.D isRateLimited bool rateLimitGenerator RateLimitGenerator @@ -81,7 +74,7 @@ type message struct { // for type receiverQueue struct { receiver *networkClient - queue []*message + queue deque.Deque[*message] active bool lk sync.Mutex } @@ -105,7 +98,6 @@ func (n *network) Adapter(p tnet.Identity, opts ...bsnet.NetOpt) bsnet.BitSwapNe client := &networkClient{ local: p.ID(), network: n, - routing: n.routingserver.Client(p), supportedProtocols: s.SupportedProtocols, } n.clients[p.ID()] = &receiverQueue{receiver: client} @@ -192,7 +184,6 @@ type networkClient struct { local peer.ID receivers []bsnet.Receiver network *network - routing routing.Routing supportedProtocols []protocol.ID } @@ -253,27 +244,6 @@ func (nc *networkClient) Stats() bsnet.Stats { } } -// FindProvidersAsync returns a channel of providers for the given key. -func (nc *networkClient) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { - // NB: this function duplicates the AddrInfo -> ID transformation in the - // bitswap network adapter. Not to worry. This network client will be - // deprecated once the ipfsnet.Mock is added. The code below is only - // temporary. - - out := make(chan peer.ID) - go func() { - defer close(out) - providers := nc.routing.FindProvidersAsync(ctx, k, max) - for info := range providers { - select { - case <-ctx.Done(): - case out <- info.ID: - } - } - }() - return out -} - func (nc *networkClient) ConnectionManager() connmgr.ConnManager { return &connmgr.NullConnMgr{} } @@ -322,11 +292,6 @@ func (nc *networkClient) NewMessageSender(ctx context.Context, p peer.ID, opts * }, nil } -// Provide provides the key to the network. -func (nc *networkClient) Provide(ctx context.Context, k cid.Cid) error { - return nc.routing.Provide(ctx, k, true) -} - func (nc *networkClient) Start(r ...bsnet.Receiver) { nc.receivers = r } @@ -334,15 +299,15 @@ func (nc *networkClient) Start(r ...bsnet.Receiver) { func (nc *networkClient) Stop() { } -func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { +func (nc *networkClient) Connect(_ context.Context, p peer.AddrInfo) error { nc.network.mu.Lock() - otherClient, ok := nc.network.clients[p] + otherClient, ok := nc.network.clients[p.ID] if !ok { nc.network.mu.Unlock() return errors.New("no such peer in network") } - tag := tagForPeers(nc.local, p) + tag := tagForPeers(nc.local, p.ID) if _, ok := nc.network.conns[tag]; ok { nc.network.mu.Unlock() // log.Warning("ALREADY CONNECTED TO PEER (is this a reconnect? test lib needs fixing)") @@ -352,7 +317,7 @@ func (nc *networkClient) ConnectTo(_ context.Context, p peer.ID) error { nc.network.mu.Unlock() otherClient.receiver.PeerConnected(nc.local) - nc.PeerConnected(p) + nc.PeerConnected(p.ID) return nil } @@ -380,7 +345,7 @@ func (nc *networkClient) DisconnectFrom(_ context.Context, p peer.ID) error { func (rq *receiverQueue) enqueue(m *message) { rq.lk.Lock() defer rq.lk.Unlock() - rq.queue = append(rq.queue, m) + rq.queue.PushBack(m) if !rq.active { rq.active = true go rq.process() @@ -388,29 +353,29 @@ func (rq *receiverQueue) enqueue(m *message) { } func (rq *receiverQueue) Swap(i, j int) { - rq.queue[i], rq.queue[j] = rq.queue[j], rq.queue[i] + rq.queue.Swap(i, j) } func (rq *receiverQueue) Len() int { - return len(rq.queue) + return rq.queue.Len() } func (rq *receiverQueue) Less(i, j int) bool { - return rq.queue[i].shouldSend.UnixNano() < rq.queue[j].shouldSend.UnixNano() + return rq.queue.At(i).shouldSend.UnixNano() < rq.queue.At(j).shouldSend.UnixNano() } func (rq *receiverQueue) process() { for { rq.lk.Lock() - sort.Sort(rq) - if len(rq.queue) == 0 { + if rq.queue.Len() == 0 { rq.active = false rq.lk.Unlock() return } - m := rq.queue[0] + sort.Sort(rq) + m := rq.queue.Front() if time.Until(m.shouldSend).Seconds() < 0.1 { - rq.queue = rq.queue[1:] + rq.queue.PopFront() rq.lk.Unlock() time.Sleep(time.Until(m.shouldSend)) atomic.AddUint64(&rq.receiver.stats.MessagesRecvd, 1) diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 43e474cb4..3b02ca4dd 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -85,10 +85,10 @@ type blockService struct { type Option func(*blockService) // WriteThrough disable cache checks for writes and make them go straight to -// the blockstore. -func WriteThrough() Option { +// the blockstore, when enabled. +func WriteThrough(enabled bool) Option { return func(bs *blockService) { - bs.checkFirst = false + bs.checkFirst = !enabled } } diff --git a/blockservice/blockservice_test.go b/blockservice/blockservice_test.go index f1a64de80..97680a66a 100644 --- a/blockservice/blockservice_test.go +++ b/blockservice/blockservice_test.go @@ -29,7 +29,7 @@ func TestWriteThroughWorks(t *testing.T) { } exchbstore := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) exch := offline.Exchange(exchbstore) - bserv := New(bstore, exch, WriteThrough()) + bserv := New(bstore, exch, WriteThrough(true)) block := random.BlocksOfSize(1, blockSize)[0] @@ -63,7 +63,7 @@ func TestExchangeWrite(t *testing.T) { offline.Exchange(exchbstore), 0, } - bserv := New(bstore, exch, WriteThrough()) + bserv := New(bstore, exch, WriteThrough(true)) for name, fetcher := range map[string]BlockGetter{ "blockservice": bserv, @@ -137,7 +137,7 @@ func TestLazySessionInitialization(t *testing.T) { session := offline.Exchange(bstore2) exch := offline.Exchange(bstore3) sessionExch := &fakeSessionExchange{Interface: exch, session: session} - bservSessEx := New(bstore, sessionExch, WriteThrough()) + bservSessEx := New(bstore, sessionExch, WriteThrough(true)) blks := random.BlocksOfSize(2, blockSize) block := blks[0] @@ -239,7 +239,7 @@ func TestNilExchange(t *testing.T) { block := random.BlocksOfSize(1, blockSize)[0] bs := blockstore.NewBlockstore(dssync.MutexWrap(ds.NewMapDatastore())) - bserv := New(bs, nil, WriteThrough()) + bserv := New(bs, nil, WriteThrough(true)) sess := NewSession(ctx, bserv) _, err := sess.GetBlock(ctx, block.Cid()) if !ipld.IsNotFound(err) { diff --git a/blockservice/test/mock.go b/blockservice/test/mock.go index e32b10b99..77eeed127 100644 --- a/blockservice/test/mock.go +++ b/blockservice/test/mock.go @@ -10,14 +10,15 @@ import ( // Mocks returns |n| connected mock Blockservices func Mocks(n int, opts ...blockservice.Option) []blockservice.BlockService { - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0)) - sg := testinstance.NewTestInstanceGenerator(net, nil, nil) - + net := tn.VirtualNetwork(delay.Fixed(0)) + routing := mockrouting.NewServer() + sg := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) instances := sg.Instances(n) var servs []blockservice.BlockService for _, i := range instances { - servs = append(servs, blockservice.New(i.Blockstore(), i.Exchange, opts...)) + servs = append(servs, blockservice.New(i.Blockstore, + i.Exchange, opts...)) } return servs } diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 27260e5d8..368670d3d 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -122,11 +122,11 @@ type Option struct { } // WriteThrough skips checking if the blockstore already has a block before -// writing it. -func WriteThrough() Option { +// writing it, when enabled. +func WriteThrough(enabled bool) Option { return Option{ func(bs *blockstore) { - bs.writeThrough = true + bs.writeThrough = enabled }, } } diff --git a/bootstrap/bootstrap.go b/bootstrap/bootstrap.go index 303405f14..eea833be3 100644 --- a/bootstrap/bootstrap.go +++ b/bootstrap/bootstrap.go @@ -10,9 +10,6 @@ import ( "time" logging "github.com/ipfs/go-log/v2" - "github.com/jbenet/goprocess" - goprocessctx "github.com/jbenet/goprocess/context" - periodicproc "github.com/jbenet/goprocess/periodic" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" @@ -112,78 +109,105 @@ func (cfg *BootstrapConfig) SetBackupPeers(load func(context.Context) []peer.Add // connections to well-known bootstrap peers. It also kicks off subsystem // bootstrapping (i.e. routing). func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConfig) (io.Closer, error) { - // make a signal to wait for one bootstrap round to complete. - doneWithRound := make(chan struct{}) - if len(cfg.BootstrapPeers()) == 0 { // We *need* to bootstrap but we have no bootstrap peers // configured *at all*, inform the user. log.Warn("no bootstrap nodes configured: go-ipfs may have difficulty connecting to the network") } - // the periodic bootstrap function -- the connection supervisor - periodic := func(worker goprocess.Process) { - ctx := goprocessctx.OnClosingContext(worker) + ctx, cancel := context.WithCancel(context.Background()) - if err := bootstrapRound(ctx, host, cfg); err != nil { - log.Debugf("%s bootstrap error: %s", id, err) + // Signal when first bootstrap round is complete, started independent of ticker. + doneWithRound := make(chan struct{}) + + go func() { + // the periodic bootstrap function -- the connection supervisor + periodic := func() { + if err := bootstrapRound(ctx, host, cfg); err != nil { + log.Debugf("%s bootstrap error: %s", id, err) + } } - // Exit the first call (triggered independently by `proc.Go`, not `Tick`) - // only after being done with the *single* Routing.Bootstrap call. Following - // periodic calls (`Tick`) will not block on this. + ticker := time.NewTicker(cfg.Period) + defer ticker.Stop() + + // Run first round independent of ticker. + periodic() <-doneWithRound - } + if ctx.Err() != nil { + return + } - // kick off the node's periodic bootstrapping - proc := periodicproc.Tick(cfg.Period, periodic) - proc.Go(periodic) // run one right now. + for { + select { + case <-ticker.C: + periodic() + case <-ctx.Done(): + return + } + } + }() // kick off Routing.Bootstrap if rt != nil { - ctx := goprocessctx.OnClosingContext(proc) if err := rt.Bootstrap(ctx); err != nil { - proc.Close() + cancel() + close(doneWithRound) return nil, err } } - doneWithRound <- struct{}{} - close(doneWithRound) // it no longer blocks periodic - // If loadBackupBootstrapPeers is not nil then saveBackupBootstrapPeers // must also not be nil. if cfg.loadBackupBootstrapPeers != nil { - startSavePeersAsTemporaryBootstrapProc(cfg, host, proc) + doneWithRound <- struct{}{} // wait for first bootstrap + startSavePeersAsTemporaryBootstrapProc(ctx, cfg, host) } - return proc, nil + return &bootstrapCloser{ + cancel: cancel, + }, nil +} + +type bootstrapCloser struct { + cancel context.CancelFunc +} + +func (bsc *bootstrapCloser) Close() error { + bsc.cancel() + return nil } // Aside of the main bootstrap process we also run a secondary one that saves // connected peers as a backup measure if we can't connect to the official // bootstrap ones. These peers will serve as *temporary* bootstrap nodes. -func startSavePeersAsTemporaryBootstrapProc(cfg BootstrapConfig, host host.Host, bootstrapProc goprocess.Process) { - savePeersFn := func(worker goprocess.Process) { - ctx := goprocessctx.OnClosingContext(worker) +func startSavePeersAsTemporaryBootstrapProc(ctx context.Context, cfg BootstrapConfig, host host.Host) { + go func() { + periodic := func() { + if err := saveConnectedPeersAsTemporaryBootstrap(ctx, host, cfg); err != nil { + log.Debugf("saveConnectedPeersAsTemporaryBootstrap error: %s", err) + } + } - if err := saveConnectedPeersAsTemporaryBootstrap(ctx, host, cfg); err != nil { - log.Debugf("saveConnectedPeersAsTemporaryBootstrap error: %s", err) + ticker := time.NewTicker(cfg.BackupBootstrapInterval) + defer ticker.Stop() + + // Run the first round now (after the first bootstrap process has + // finished) as the SavePeersPeriod can be much longer than bootstrap. + periodic() + if ctx.Err() != nil { + return } - } - savePeersProc := periodicproc.Tick(cfg.BackupBootstrapInterval, savePeersFn) - // When the main bootstrap process ends also terminate the 'save connected - // peers' ones. Coupling the two seems the easiest way to handle this backup - // process without additional complexity. - go func() { - <-bootstrapProc.Closing() - savePeersProc.Close() + for { + select { + case <-ticker.C: + periodic() + case <-ctx.Done(): + return + } + } }() - - // Run the first round now (after the first bootstrap process has finished) - // as the SavePeersPeriod can be much longer than bootstrap. - savePeersProc.Go(savePeersFn) } func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host, cfg BootstrapConfig) error { @@ -304,15 +328,19 @@ func peersConnect(ctx context.Context, ph host.Host, availablePeers []peer.AddrI ctx, cancel := context.WithCancel(ctx) defer cancel() go func() { + timer := time.NewTimer(time.Second) + defer timer.Stop() + for { select { case <-ctx.Done(): return - case <-time.After(1 * time.Second): + case <-timer.C: if int(atomic.LoadUint64(&connected)) >= needed { cancel() return } + timer.Reset(time.Second) } } }() diff --git a/chunker/parse_test.go b/chunker/parse_test.go index 6809476e9..108b96729 100644 --- a/chunker/parse_test.go +++ b/chunker/parse_test.go @@ -17,7 +17,7 @@ func TestParseRabin(t *testing.T) { _, err := FromString(r, "rabin-18-25-32") if err != nil { - t.Errorf(err.Error()) + t.Error(err.Error()) } _, err = FromString(r, "rabin-15-23-31") diff --git a/docs/tracing.md b/docs/tracing.md index 868b68d95..a620fa655 100644 --- a/docs/tracing.md +++ b/docs/tracing.md @@ -54,7 +54,7 @@ Default: `"grpc"` ### `Zipkin Exporter` -See [Zipkin Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#zipkin-exporter). +See [Zipkin Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#zipkin-exporter). ### `File Exporter` @@ -66,7 +66,7 @@ Default: `"$PWD/traces.json"` ### `OTEL_PROPAGATORS` -See [General SDK Configuration](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration). +See [General SDK Configuration](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration). ## Using Jaeger UI @@ -134,6 +134,6 @@ the complete trace of this request. [Open Telemetry]: https://opentelemetry.io/ [opentelemetry-go]: https://github.com/open-telemetry/opentelemetry-go [Trace Context]: https://www.w3.org/TR/trace-context -[OpenTelemetry Environment Variable Specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md +[OpenTelemetry Environment Variable Specification]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md [OpenTelemetry Protocol Exporter]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md [Jaeger UI]: https://github.com/jaegertracing/jaeger-ui diff --git a/examples/README.md b/examples/README.md index 19313caf4..aeee689eb 100644 --- a/examples/README.md +++ b/examples/README.md @@ -26,7 +26,7 @@ Once you have your example finished, do not forget to run `go mod tidy` and addi ## Examples and Tutorials -- [Transfering UnixFS file data with Bitswap](./bitswap-transfer) +- [Transferring UnixFS file data with Bitswap](./bitswap-transfer) - [Gateway backed by a local blockstore in form of a CAR file](./gateway/car-file) - [Gateway backed by a remote (HTTP) blockstore and IPNS resolver](./gateway/proxy-blocks) - [Gateway backed by a remote (HTTP) CAR Gateway](./gateway/proxy-car) diff --git a/examples/bitswap-transfer/README.md b/examples/bitswap-transfer/README.md index 55d69e9d6..3a42b3beb 100644 --- a/examples/bitswap-transfer/README.md +++ b/examples/bitswap-transfer/README.md @@ -1,4 +1,4 @@ -# Transfering UnixFS file with Bitswap +# Transferring UnixFS file with Bitswap This is an example that quickly shows how to use IPFS tooling to move around a file. diff --git a/examples/bitswap-transfer/main.go b/examples/bitswap-transfer/main.go index 921dca3fa..fc2d5ded3 100644 --- a/examples/bitswap-transfer/main.go +++ b/examples/bitswap-transfer/main.go @@ -32,7 +32,6 @@ import ( unixfile "github.com/ipfs/boxo/ipld/unixfs/file" "github.com/ipfs/boxo/ipld/unixfs/importer/balanced" uih "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" - routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" bsclient "github.com/ipfs/boxo/bitswap/client" bsnet "github.com/ipfs/boxo/bitswap/network" @@ -178,15 +177,15 @@ func startDataServer(ctx context.Context, h host.Host) (cid.Cid, *bsserver.Serve // Start listening on the Bitswap protocol // For this example we're not leveraging any content routing (DHT, IPNI, delegated routing requests, etc.) as we know the peer we are fetching from - n := bsnet.NewFromIpfsHost(h, routinghelpers.Null{}) + n := bsnet.NewFromIpfsHost(h) bswap := bsserver.New(ctx, n, bs) n.Start(bswap) return nd.Cid(), bswap, nil } func runClient(ctx context.Context, h host.Host, c cid.Cid, targetPeer string) ([]byte, error) { - n := bsnet.NewFromIpfsHost(h, routinghelpers.Null{}) - bswap := bsclient.New(ctx, n, blockstore.NewBlockstore(datastore.NewNullDatastore())) + n := bsnet.NewFromIpfsHost(h) + bswap := bsclient.New(ctx, n, nil, blockstore.NewBlockstore(datastore.NewNullDatastore())) n.Start(bswap) defer bswap.Close() diff --git a/examples/gateway/common/tracing.go b/examples/gateway/common/tracing.go index 403c8cf50..e7255520c 100644 --- a/examples/gateway/common/tracing.go +++ b/examples/gateway/common/tracing.go @@ -28,7 +28,7 @@ func SetupTracing(ctx context.Context, serviceName string) (*trace.TracerProvide // using autoprop.NewTextMapPropagator, we ensure the value of the environmental // variable OTEL_PROPAGATORS is respected, if set. By default, Trace Context // and Baggage are used. More details on: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md otel.SetTextMapPropagator(autoprop.NewTextMapPropagator()) return tp, nil diff --git a/examples/go.mod b/examples/go.mod index fd77bc07e..b5856dee3 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -1,36 +1,37 @@ module github.com/ipfs/boxo/examples -go 1.22 +go 1.22.0 + +toolchain go1.22.8 require ( - github.com/ipfs/boxo v0.22.0 + github.com/ipfs/boxo v0.24.0 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 - github.com/ipld/go-car/v2 v2.13.1 + github.com/ipld/go-car/v2 v2.14.2 github.com/ipld/go-ipld-prime v0.21.0 - github.com/libp2p/go-libp2p v0.36.3 - github.com/libp2p/go-libp2p-routing-helpers v0.7.3 + github.com/libp2p/go-libp2p v0.37.2 github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-multicodec v0.9.0 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 - go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 ) require ( github.com/Jorropo/jsync v1.0.1 // indirect - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 // indirect + github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf // indirect github.com/cskr/pubsub v1.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect @@ -41,17 +42,19 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/gabriel-vasile/mimetype v1.4.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.6 // indirect + github.com/gammazero/chanqueue v1.0.0 // indirect + github.com/gammazero/deque v1.0.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20241017200806-017d972448fc // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect @@ -65,7 +68,7 @@ require ( github.com/ipfs/go-ipfs-ds-help v1.1.1 // indirect github.com/ipfs/go-ipfs-exchange-interface v0.2.1 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect - github.com/ipfs/go-ipfs-redirects-file v0.1.1 // indirect + github.com/ipfs/go-ipfs-redirects-file v0.1.2 // indirect github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-cbor v0.1.0 // indirect github.com/ipfs/go-ipld-format v0.6.0 // indirect @@ -75,24 +78,25 @@ require ( github.com/ipfs/go-merkledag v0.11.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect - github.com/ipfs/go-unixfsnode v1.9.0 // indirect + github.com/ipfs/go-unixfsnode v1.9.2 // indirect github.com/ipfs/go-verifcid v0.0.3 // indirect github.com/ipld/go-car v0.6.2 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-doh-resolver v0.4.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kad-dht v0.25.2 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-libp2p-kad-dht v0.27.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.4 // indirect github.com/libp2p/go-libp2p-record v0.2.0 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.4 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect @@ -100,53 +104,53 @@ require ( github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.61 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect - github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect + github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.5.0 // indirect + github.com/multiformats/go-multistream v0.6.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.19.1 // indirect + github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/datachannel v1.5.9 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/ice/v2 v2.3.34 // indirect - github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/ice/v2 v2.3.36 // indirect + github.com/pion/interceptor v0.1.37 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.8 // indirect - github.com/pion/sctp v1.8.20 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pion/webrtc/v3 v3.3.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.2 // indirect - github.com/quic-go/webtransport-go v0.8.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect - github.com/samber/lo v1.39.0 // indirect + github.com/samber/lo v1.47.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect @@ -154,40 +158,40 @@ require ( github.com/whyrusleeping/cbor-gen v0.1.2 // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/wlynxg/anet v0.0.3 // indirect + github.com/wlynxg/anet v0.0.5 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/propagators/aws v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 // indirect go.opentelemetry.io/contrib/propagators/ot v1.21.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect - go.opentelemetry.io/otel/exporters/zipkin v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect - go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/zipkin v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.22.1 // indirect - go.uber.org/mock v0.4.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/mock v0.5.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.23.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/tools v0.26.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect ) diff --git a/examples/go.sum b/examples/go.sum index f8d2600ad..fce1aab4a 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -10,8 +10,8 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -39,8 +39,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -75,8 +75,12 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= -github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= +github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= +github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gammazero/chanqueue v1.0.0 h1:FER/sMailGFA3DDvFooEkipAMU+3c9Bg3bheloPSz6o= +github.com/gammazero/chanqueue v1.0.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= +github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -97,9 +101,8 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -129,8 +132,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241017200806-017d972448fc h1:NGyrhhFhwvRAZg02jnYVg3GBQy0qGBKmFQJwaPmpmxs= +github.com/google/pprof v0.0.0-20241017200806-017d972448fc/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -147,8 +150,8 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -180,8 +183,6 @@ github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7 github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= -github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= @@ -192,8 +193,8 @@ github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uY github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= -github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= @@ -217,16 +218,14 @@ github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVzte github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= -github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= -github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= +github.com/ipfs/go-unixfsnode v1.9.2 h1:0A12BYs4XOtDPJTMlwmNPlllDfqcc4yie4e919hcUXk= +github.com/ipfs/go-unixfsnode v1.9.2/go.mod h1:v1nuMFHf4QTIhFUdPMvg1nQu7AqDLvIdwyvJ531Ot1U= github.com/ipfs/go-verifcid v0.0.3 h1:gmRKccqhWDocCRkC+a59g5QW7uJw5bpX9HWBevXa0zs= github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJDzQJRvOUw= github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= -github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= -github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= +github.com/ipld/go-car/v2 v2.14.2 h1:9ERr7KXpCC7If0rChZLhYDlyr6Bes6yRKPJnCO3hdHY= +github.com/ipld/go-car/v2 v2.14.2/go.mod h1:0iPB/825lTZLU2zPK5bVTk/R3V2612E1VI279OGSXWA= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= @@ -248,8 +247,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -263,26 +262,28 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+0S7FQqw= github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= -github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.37.2 h1:Irh+n9aDPTLt9wJYwtlHu6AhMUipbC1cGoJtOiBqI9c= +github.com/libp2p/go-libp2p v0.37.2/go.mod h1:M8CRRywYkqC6xKHdZ45hmqVckBj5z4mRLIMLWReypz8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= -github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= -github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= -github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-kad-dht v0.27.0 h1:1Ea32tVTPiAfaLpPMbaBWFJgbsi/JpMqC2YBuFdf32o= +github.com/libp2p/go-libp2p-kad-dht v0.27.0/go.mod h1:ixhjLuzaXSGtWsKsXTj7erySNuVC4UP7NO015cRrF14= +github.com/libp2p/go-libp2p-kbucket v0.6.4 h1:OjfiYxU42TKQSB8t8WYd8MKhYhMJeO2If+NiuKfb6iQ= +github.com/libp2p/go-libp2p-kbucket v0.6.4/go.mod h1:jp6w82sczYaBsAypt5ayACcRJi0lgsba7o4TzJKEfWA= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= -github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -305,8 +306,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -331,8 +332,8 @@ github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y9 github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= @@ -342,8 +343,8 @@ github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI1 github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= -github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= +github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= @@ -351,10 +352,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= -github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -367,15 +368,15 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= +github.com/pion/datachannel v1.5.9/go.mod h1:kDUuk4CU4Uxp82NH4LQZbISULkX/HtzKa4P7ldf9izE= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/ice/v2 v2.3.36 h1:SopeXiVbbcooUg2EIR8sq4b13RQ8gzrkkldOVg+bBsc= +github.com/pion/ice/v2 v2.3.36/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= @@ -386,10 +387,10 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= -github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= -github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= @@ -402,13 +403,13 @@ github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLh github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= -github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pion/webrtc/v3 v3.3.4 h1:v2heQVnXTSqNRXcaFQVOhIOYkLMxOu1iJG8uy1djvkk= +github.com/pion/webrtc/v3 v3.3.4/go.mod h1:liNa+E1iwyzyXqNUwvoMRNQ10x8h8FOeJKL8RkIbamE= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -417,33 +418,33 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= -github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= -github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= -github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -469,6 +470,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slok/go-http-metrics v0.12.0 h1:mAb7hrX4gB4ItU6NkFoKYdBslafg3o60/HbGBRsKaG8= +github.com/slok/go-http-metrics v0.12.0/go.mod h1:Ee/mdT9BYvGrlGzlClkK05pP2hRHmVbRF9dtUVS8LNA= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= @@ -494,8 +497,6 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -516,8 +517,9 @@ github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -525,8 +527,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1 h1:cXTYcMjY0dsYokAuo8LbNBQxpF8VgTHdiHJJ1zlIXl4= go.opentelemetry.io/contrib/propagators/autoprop v0.46.1/go.mod h1:WZxgny1/6+j67B1s72PLJ4bGjidoWFzSmLNfJKVt2bo= go.opentelemetry.io/contrib/propagators/aws v1.21.1 h1:uQIQIDWb0gzyvon2ICnghpLAf9w7ADOCUiIiwCQgR2o= @@ -537,39 +539,39 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.21.1 h1:f4beMGDKiVzg9IcX7/VuWV go.opentelemetry.io/contrib/propagators/jaeger v1.21.1/go.mod h1:U9jhkEl8d1LL+QXY7q3kneJWJugiN3kZJV2OWz3hkBY= go.opentelemetry.io/contrib/propagators/ot v1.21.1 h1:3TN5vkXjKYWp0YdMcnUEC/A+pBPvqz9V3nCS2xmcurk= go.opentelemetry.io/contrib/propagators/ot v1.21.1/go.mod h1:oy0MYCbS/b3cqUDW37wBWtlwBIsutngS++Lklpgh+fc= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0 h1:aXcxb7F6ZDC1o2Z52LDfS2g6M2FB5CrxdR2gzY4QRNs= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0/go.mod h1:+WMURoi4KmVB7ypbFPx3xtZTWen2Ca3lRK9u6DVTO5M= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/exporters/zipkin v1.31.0 h1:CgucL0tj3717DJnni7HVVB2wExzi8c2zJNEA2BhLMvI= +go.opentelemetry.io/otel/exporters/zipkin v1.31.0/go.mod h1:rfzOVNiSwIcWtEC2J8epwG26fiaXlYvLySJ7bwsrtAE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -594,11 +596,11 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -612,8 +614,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -637,8 +639,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -654,8 +656,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -682,8 +684,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -699,8 +701,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -724,14 +726,14 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -748,10 +750,10 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -760,8 +762,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -771,8 +773,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/examples/routing/delegated-routing-client/main.go b/examples/routing/delegated-routing-client/main.go index 8fac342ac..d60d08675 100644 --- a/examples/routing/delegated-routing-client/main.go +++ b/examples/routing/delegated-routing-client/main.go @@ -26,31 +26,32 @@ func main() { namePtr := flag.String("ipns", "", "ipns name to retrieve record for") flag.Parse() - if err := run(os.Stdout, *gatewayUrlPtr, *cidPtr, *pidPtr, *namePtr, *timeoutPtr); err != nil { + timeout := time.Duration(*timeoutPtr) * time.Second + if err := run(os.Stdout, *gatewayUrlPtr, *cidPtr, *pidPtr, *namePtr, timeout); err != nil { log.Fatal(err) } } -func run(w io.Writer, gatewayURL, cidStr, pidStr, nameStr string, timeoutSeconds int) error { +func run(w io.Writer, gatewayURL, cidStr, pidStr, nameStr string, timeout time.Duration) error { // Creates a new Delegated Routing V1 client. client, err := client.New(gatewayURL) if err != nil { return err } - timeout := time.Duration(timeoutSeconds) * time.Second ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() if cidStr != "" { return findProviders(w, ctx, client, cidStr) - } else if pidStr != "" { + } + if pidStr != "" { return findPeers(w, ctx, client, pidStr) - } else if nameStr != "" { + } + if nameStr != "" { return findIPNS(w, ctx, client, nameStr) - } else { - return errors.New("cid or peer must be provided") } + return errors.New("cid or peer must be provided") } func findProviders(w io.Writer, ctx context.Context, client *client.Client, cidStr string) error { diff --git a/examples/routing/delegated-routing-client/main_test.go b/examples/routing/delegated-routing-client/main_test.go index 2dab7b13a..625735efc 100644 --- a/examples/routing/delegated-routing-client/main_test.go +++ b/examples/routing/delegated-routing-client/main_test.go @@ -25,15 +25,16 @@ func TestFindProviders(t *testing.T) { if r.URL.Path == "/routing/v1/providers/"+cidStr { w.Header().Set("Content-Type", "application/x-ndjson") w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn","Addrs":["/ip4/111.222.222.111/tcp/5734"],"Protocols":["transport-bitswap"]}` + "\n")) - w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWB6RAWgcmHAP7TGEGK7utV2ZuqSzX1DNjRa97TtJ7139n","Addrs":["/ip4/127.0.0.1/tcp/5734"],"Protocols":["transport-horse"]}` + "\n")) + w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWS6BmwfQEZcRqCHCBbDL2DF5a6F7dZnbPFkwmZCuLEK5f","Addrs":["/ip4/127.0.0.1/tcp/6434"],"Protocols":["horse"]}` + "\n")) // this one will be skipped by DefaultProtocolFilter + w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWB6RAWgcmHAP7TGEGK7utV2ZuqSzX1DNjRa97TtJ7139n","Addrs":["/ip4/127.0.0.1/tcp/5734"],"Protocols":[]}` + "\n")) } })) t.Cleanup(ts.Close) out := &bytes.Buffer{} - err := run(out, ts.URL, cidStr, "", "", 1) + err := run(out, ts.URL, cidStr, "", "", time.Second) assert.Contains(t, out.String(), "12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn\n\tProtocols: [transport-bitswap]\n\tAddresses: [/ip4/111.222.222.111/tcp/5734]\n") - assert.Contains(t, out.String(), "12D3KooWB6RAWgcmHAP7TGEGK7utV2ZuqSzX1DNjRa97TtJ7139n\n\tProtocols: [transport-horse]\n\tAddresses: [/ip4/127.0.0.1/tcp/5734]\n") + assert.Contains(t, out.String(), "12D3KooWB6RAWgcmHAP7TGEGK7utV2ZuqSzX1DNjRa97TtJ7139n\n\tProtocols: []\n\tAddresses: [/ip4/127.0.0.1/tcp/5734]\n") assert.NoError(t, err) } @@ -49,7 +50,7 @@ func TestFindPeers(t *testing.T) { t.Cleanup(ts.Close) out := &bytes.Buffer{} - err := run(out, ts.URL, "", pidStr, "", 1) + err := run(out, ts.URL, "", pidStr, "", time.Second) assert.Contains(t, out.String(), "12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn\n\tProtocols: [transport-bitswap]\n\tAddresses: [/ip4/111.222.222.111/tcp/5734]\n") assert.NoError(t, err) } @@ -66,7 +67,7 @@ func TestGetIPNS(t *testing.T) { t.Cleanup(ts.Close) out := &bytes.Buffer{} - err := run(out, ts.URL, "", "", name.String(), 1) + err := run(out, ts.URL, "", "", name.String(), time.Second) assert.Contains(t, out.String(), fmt.Sprintf("/ipns/%s\n\tSignature: VALID\n\tValue: /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4\n", name.String())) assert.NoError(t, err) } diff --git a/exchange/providing/providing.go b/exchange/providing/providing.go new file mode 100644 index 000000000..6b2887858 --- /dev/null +++ b/exchange/providing/providing.go @@ -0,0 +1,46 @@ +// Package providing implements an exchange wrapper which +// does content providing for new blocks. +package providing + +import ( + "context" + + "github.com/ipfs/boxo/exchange" + "github.com/ipfs/boxo/provider" + blocks "github.com/ipfs/go-block-format" +) + +// Exchange is an exchange wrapper that calls Provide for blocks received +// over NotifyNewBlocks. +type Exchange struct { + exchange.Interface + provider provider.Provider +} + +// New creates a new providing Exchange with the given exchange and provider. +// This is a light wrapper. We recommend that the provider supports the +// handling of many concurrent provides etc. as it is called directly for +// every new block. +func New(base exchange.Interface, provider provider.Provider) *Exchange { + return &Exchange{ + Interface: base, + provider: provider, + } +} + +// NotifyNewBlocks calls NotifyNewBlocks on the underlying provider and +// provider.Provide for every block after that. +func (ex *Exchange) NotifyNewBlocks(ctx context.Context, blocks ...blocks.Block) error { + // Notify blocks on the underlying exchange. + err := ex.Interface.NotifyNewBlocks(ctx, blocks...) + if err != nil { + return err + } + + for _, b := range blocks { + if err := ex.provider.Provide(ctx, b.Cid(), true); err != nil { + return err + } + } + return nil +} diff --git a/exchange/providing/providing_test.go b/exchange/providing/providing_test.go new file mode 100644 index 000000000..4ee1dcf6b --- /dev/null +++ b/exchange/providing/providing_test.go @@ -0,0 +1,74 @@ +package providing + +import ( + "context" + "testing" + "time" + + testinstance "github.com/ipfs/boxo/bitswap/testinstance" + tn "github.com/ipfs/boxo/bitswap/testnet" + "github.com/ipfs/boxo/blockservice" + "github.com/ipfs/boxo/provider" + mockrouting "github.com/ipfs/boxo/routing/mock" + delay "github.com/ipfs/go-ipfs-delay" + "github.com/ipfs/go-test/random" +) + +func TestExchange(t *testing.T) { + ctx := context.Background() + net := tn.VirtualNetwork(delay.Fixed(0)) + routing := mockrouting.NewServer() + sg := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) + i := sg.Next() + provFinder := routing.Client(i.Identity) + prov, err := provider.New(i.Datastore, + provider.Online(provFinder), + ) + if err != nil { + t.Fatal(err) + } + provExchange := New(i.Exchange, prov) + // write-through so that we notify when re-adding block + bs := blockservice.New(i.Blockstore, provExchange, + blockservice.WriteThrough(true)) + block := random.BlocksOfSize(1, 10)[0] + // put it on the blockstore of the first instance + err = i.Blockstore.Put(ctx, block) + if err != nil { + t.Fatal() + } + + // Trigger reproviding, otherwise it's not really provided. + err = prov.Reprovide(ctx) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + + providersChan := provFinder.FindProvidersAsync(ctx, block.Cid(), 1) + _, ok := <-providersChan + if ok { + t.Fatal("there should be no providers yet for block") + } + + // Now add it via BlockService. It should trigger NotifyNewBlocks + // on the exchange and thus they should get announced. + err = bs.AddBlock(ctx, block) + if err != nil { + t.Fatal() + } + // Trigger reproviding, otherwise it's not really provided. + err = prov.Reprovide(ctx) + if err != nil { + t.Fatal(err) + } + + time.Sleep(200 * time.Millisecond) + + providersChan = provFinder.FindProvidersAsync(ctx, block.Cid(), 1) + _, ok = <-providersChan + if !ok { + t.Fatal("there should be one provider for the block") + } +} diff --git a/fetcher/helpers/block_visitor_test.go b/fetcher/helpers/block_visitor_test.go index 57d3e11ad..9ea0eacd9 100644 --- a/fetcher/helpers/block_visitor_test.go +++ b/fetcher/helpers/block_visitor_test.go @@ -44,8 +44,9 @@ func TestFetchGraphToBlocks(t *testing.T) { }) })) - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -53,7 +54,7 @@ func TestFetchGraphToBlocks(t *testing.T) { defer hasBlock.Exchange.Close() blocks := []blocks.Block{block1, block2, block3, block4} - err := hasBlock.Blockstore().PutMany(bg, blocks) + err := hasBlock.Blockstore.PutMany(bg, blocks) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, blocks...) require.NoError(t, err) @@ -61,7 +62,7 @@ func TestFetchGraphToBlocks(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) session := fetcherConfig.NewSession(context.Background()) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -94,15 +95,16 @@ func TestFetchGraphToUniqueBlocks(t *testing.T) { }) })) - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() - err := hasBlock.Blockstore().PutMany(bg, []blocks.Block{block1, block2, block3}) + err := hasBlock.Blockstore.PutMany(bg, []blocks.Block{block1, block2, block3}) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, block1, block2, block3) @@ -111,7 +113,7 @@ func TestFetchGraphToUniqueBlocks(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) session := fetcherConfig.NewSession(context.Background()) ctx, cancel := context.WithTimeout(context.Background(), time.Second) diff --git a/fetcher/impl/blockservice/fetcher_test.go b/fetcher/impl/blockservice/fetcher_test.go index 5a0b071f4..55c1d5c21 100644 --- a/fetcher/impl/blockservice/fetcher_test.go +++ b/fetcher/impl/blockservice/fetcher_test.go @@ -38,15 +38,16 @@ func TestFetchIPLDPrimeNode(t *testing.T) { }) })) - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) hasBlock := peers[0] defer hasBlock.Exchange.Close() - err := hasBlock.Blockstore().Put(bg, block) + err := hasBlock.Blockstore.Put(bg, block) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, block) @@ -55,7 +56,7 @@ func TestFetchIPLDPrimeNode(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) session := fetcherConfig.NewSession(context.Background()) @@ -87,8 +88,9 @@ func TestFetchIPLDGraph(t *testing.T) { }) })) - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -96,7 +98,7 @@ func TestFetchIPLDGraph(t *testing.T) { defer hasBlock.Exchange.Close() blocks := []blocks.Block{block1, block2, block3, block4} - err := hasBlock.Blockstore().PutMany(bg, blocks) + err := hasBlock.Blockstore.PutMany(bg, blocks) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, blocks...) require.NoError(t, err) @@ -104,7 +106,7 @@ func TestFetchIPLDGraph(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) session := fetcherConfig.NewSession(context.Background()) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -143,8 +145,9 @@ func TestFetchIPLDPath(t *testing.T) { }) })) - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -152,7 +155,7 @@ func TestFetchIPLDPath(t *testing.T) { defer hasBlock.Exchange.Close() blocks := []blocks.Block{block1, block2, block3, block4, block5} - err := hasBlock.Blockstore().PutMany(bg, blocks) + err := hasBlock.Blockstore.PutMany(bg, blocks) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, blocks...) require.NoError(t, err) @@ -160,7 +163,7 @@ func TestFetchIPLDPath(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) session := fetcherConfig.NewSession(context.Background()) ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -206,9 +209,9 @@ func TestHelpers(t *testing.T) { na.AssembleEntry("nonlink").AssignString("zoo") }) })) - - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -216,7 +219,7 @@ func TestHelpers(t *testing.T) { defer hasBlock.Exchange.Close() blocks := []blocks.Block{block1, block2, block3, block4} - err := hasBlock.Blockstore().PutMany(bg, blocks) + err := hasBlock.Blockstore.PutMany(bg, blocks) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, blocks...) require.NoError(t, err) @@ -224,7 +227,7 @@ func TestHelpers(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) t.Run("Block retrieves node", func(t *testing.T) { fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) @@ -321,8 +324,9 @@ func TestNodeReification(t *testing.T) { na.AssembleEntry("link4").AssignLink(link4) })) - net := tn.VirtualNetwork(mockrouting.NewServer(), delay.Fixed(0*time.Millisecond)) - ig := testinstance.NewTestInstanceGenerator(net, nil, nil) + routing := mockrouting.NewServer() + net := tn.VirtualNetwork(delay.Fixed(0 * time.Millisecond)) + ig := testinstance.NewTestInstanceGenerator(net, routing, nil, nil) defer ig.Close() peers := ig.Instances(2) @@ -330,7 +334,7 @@ func TestNodeReification(t *testing.T) { defer hasBlock.Exchange.Close() blocks := []blocks.Block{block2, block3, block4} - err := hasBlock.Blockstore().PutMany(bg, blocks) + err := hasBlock.Blockstore.PutMany(bg, blocks) require.NoError(t, err) err = hasBlock.Exchange.NotifyNewBlocks(bg, blocks...) require.NoError(t, err) @@ -338,7 +342,7 @@ func TestNodeReification(t *testing.T) { wantsBlock := peers[1] defer wantsBlock.Exchange.Close() - wantsGetter := blockservice.New(wantsBlock.Blockstore(), wantsBlock.Exchange) + wantsGetter := blockservice.New(wantsBlock.Blockstore, wantsBlock.Exchange) fetcherConfig := bsfetcher.NewFetcherConfig(wantsGetter) nodeReifier := func(lnkCtx ipld.LinkContext, nd ipld.Node, ls *ipld.LinkSystem) (ipld.Node, error) { return &selfLoader{Node: nd, ctx: lnkCtx.Ctx, ls: ls}, nil diff --git a/filestore/filereader.go b/filestore/filereader.go new file mode 100644 index 000000000..fba3cc942 --- /dev/null +++ b/filestore/filereader.go @@ -0,0 +1,61 @@ +package filestore + +import ( + "io" + "os" + + "golang.org/x/exp/mmap" +) + +type FileReader interface { + io.ReaderAt + io.Closer +} + +var _ FileReader = (*stdReader)(nil) + +type stdReader struct { + f *os.File +} + +// ReadAt implements the FileReader interface. +func (r *stdReader) ReadAt(p []byte, off int64) (n int, err error) { + return r.f.ReadAt(p, off) +} + +// Close implements the FileReader interface. +func (r *stdReader) Close() error { + return r.f.Close() +} + +func newStdReader(path string) (FileReader, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + return &stdReader{f: f}, nil +} + +var _ FileReader = (*mmapReader)(nil) + +type mmapReader struct { + m *mmap.ReaderAt +} + +// ReadAt implements the FileReader interface. +func (r *mmapReader) ReadAt(p []byte, off int64) (n int, err error) { + return r.m.ReadAt(p, off) +} + +// Close implements the FileReader interface. +func (r *mmapReader) Close() error { + return r.m.Close() +} + +func newMmapReader(path string) (FileReader, error) { + m, err := mmap.Open(path) + if err != nil { + return nil, err + } + return &mmapReader{m: m}, nil +} diff --git a/filestore/filestore_test.go b/filestore/filestore_test.go index 9d455193a..4d17adbe7 100644 --- a/filestore/filestore_test.go +++ b/filestore/filestore_test.go @@ -18,14 +18,14 @@ import ( var bg = context.Background() -func newTestFilestore(t *testing.T) (string, *Filestore) { +func newTestFilestore(t *testing.T, option ...Option) (string, *Filestore) { mds := ds.NewMapDatastore() testdir, err := os.MkdirTemp("", "filestore-test") if err != nil { t.Fatal(err) } - fm := NewFileManager(mds, testdir) + fm := NewFileManager(mds, testdir, option...) fm.AllowFiles = true bs := blockstore.NewBlockstore(mds) @@ -48,62 +48,74 @@ func makeFile(dir string, data []byte) (string, error) { } func TestBasicFilestore(t *testing.T) { - dir, fs := newTestFilestore(t) - - buf := make([]byte, 1000) - rand.Read(buf) - - fname, err := makeFile(dir, buf) - if err != nil { - t.Fatal(err) - } - - var cids []cid.Cid - for i := 0; i < 100; i++ { - n := &posinfo.FilestoreNode{ - PosInfo: &posinfo.PosInfo{ - FullPath: fname, - Offset: uint64(i * 10), - }, - Node: dag.NewRawNode(buf[i*10 : (i+1)*10]), - } - - err := fs.Put(bg, n) - if err != nil { - t.Fatal(err) - } - cids = append(cids, n.Node.Cid()) - } - - for i, c := range cids { - blk, err := fs.Get(bg, c) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(blk.RawData(), buf[i*10:(i+1)*10]) { - t.Fatal("data didnt match on the way out") - } - } - - kch, err := fs.AllKeysChan(context.Background()) - if err != nil { - t.Fatal(err) - } - - out := make(map[string]struct{}) - for c := range kch { - out[c.KeyString()] = struct{}{} - } - - if len(out) != len(cids) { - t.Fatal("mismatch in number of entries") - } - - for _, c := range cids { - if _, ok := out[c.KeyString()]; !ok { - t.Fatal("missing cid: ", c) - } + cases := []struct { + name string + options []Option + }{ + {"default", nil}, + {"mmap", []Option{WithMMapReader()}}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + dir, fs := newTestFilestore(t, c.options...) + + buf := make([]byte, 1000) + rand.Read(buf) + + fname, err := makeFile(dir, buf) + if err != nil { + t.Fatal(err) + } + + var cids []cid.Cid + for i := 0; i < 100; i++ { + n := &posinfo.FilestoreNode{ + PosInfo: &posinfo.PosInfo{ + FullPath: fname, + Offset: uint64(i * 10), + }, + Node: dag.NewRawNode(buf[i*10 : (i+1)*10]), + } + + err := fs.Put(bg, n) + if err != nil { + t.Fatal(err) + } + cids = append(cids, n.Node.Cid()) + } + + for i, c := range cids { + blk, err := fs.Get(bg, c) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(blk.RawData(), buf[i*10:(i+1)*10]) { + t.Fatal("data didnt match on the way out") + } + } + + kch, err := fs.AllKeysChan(context.Background()) + if err != nil { + t.Fatal(err) + } + + out := make(map[string]struct{}) + for c := range kch { + out[c.KeyString()] = struct{}{} + } + + if len(out) != len(cids) { + t.Fatal("mismatch in number of entries") + } + + for _, c := range cids { + if _, ok := out[c.KeyString()]; !ok { + t.Fatal("missing cid: ", c) + } + } + }) } } diff --git a/filestore/fsrefstore.go b/filestore/fsrefstore.go index 158eadf7a..eb7f190d0 100644 --- a/filestore/fsrefstore.go +++ b/filestore/fsrefstore.go @@ -25,6 +25,8 @@ import ( // FilestorePrefix identifies the key prefix for FileManager blocks. var FilestorePrefix = ds.NewKey("filestore") +type Option func(*FileManager) + // FileManager is a blockstore implementation which stores special // blocks FilestoreNode type. These nodes only contain a reference // to the actual location of the block data in the filesystem @@ -34,6 +36,7 @@ type FileManager struct { AllowUrls bool ds ds.Batching root string + makeReader func(path string) (FileReader, error) } // CorruptReferenceError implements the error interface. @@ -51,11 +54,32 @@ func (c CorruptReferenceError) Error() string { return c.Err.Error() } +// WithMMapReader sets the FileManager's reader factory to use memory-mapped file I/O. +// On Windows, when reading and writing to a file simultaneously, the system would consume +// a significant amount of memory due to caching. This memory usage is not reflected in +// the application but in the system. Using memory-mapped files (implemented with +// CreateFileMapping on Windows) avoids this issue. +func WithMMapReader() Option { + return func(f *FileManager) { + f.makeReader = newMmapReader + } +} + // NewFileManager initializes a new file manager with the given // datastore and root. All FilestoreNodes paths are relative to the // root path given here, which is prepended for any operations. -func NewFileManager(ds ds.Batching, root string) *FileManager { - return &FileManager{ds: dsns.Wrap(ds, FilestorePrefix), root: root} +func NewFileManager(ds ds.Batching, root string, options ...Option) *FileManager { + f := &FileManager{ + ds: dsns.Wrap(ds, FilestorePrefix), + root: root, + makeReader: newStdReader, + } + + for _, option := range options { + option(f) + } + + return f } // AllKeysChan returns a channel from which to read the keys stored in @@ -175,7 +199,7 @@ func (f *FileManager) readFileDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, er p := filepath.FromSlash(d.GetFilePath()) abspath := filepath.Join(f.root, p) - fi, err := os.Open(abspath) + fi, err := f.makeReader(abspath) if os.IsNotExist(err) { return nil, &CorruptReferenceError{StatusFileNotFound, err} } else if err != nil { @@ -183,13 +207,8 @@ func (f *FileManager) readFileDataObj(m mh.Multihash, d *pb.DataObj) ([]byte, er } defer fi.Close() - _, err = fi.Seek(int64(d.GetOffset()), io.SeekStart) - if err != nil { - return nil, &CorruptReferenceError{StatusFileError, err} - } - outbuf := make([]byte, d.GetSize_()) - _, err = io.ReadFull(fi, outbuf) + _, err = fi.ReadAt(outbuf, int64(d.GetOffset())) if err == io.EOF || err == io.ErrUnexpectedEOF { return nil, &CorruptReferenceError{StatusFileChanged, err} } else if err != nil { diff --git a/gateway/backend_car.go b/gateway/backend_car.go index d2b33a0fc..a0a9eca2d 100644 --- a/gateway/backend_car.go +++ b/gateway/backend_car.go @@ -73,7 +73,7 @@ func NewCarBackend(f CarFetcher, opts ...BackendOption) (*CarBackend, error) { return nil, err } - var promReg prometheus.Registerer = prometheus.NewRegistry() + var promReg prometheus.Registerer = prometheus.DefaultRegisterer if compiledOptions.promRegistry != nil { promReg = compiledOptions.promRegistry } @@ -117,6 +117,11 @@ func NewRemoteCarBackend(gatewayURL []string, httpClient *http.Client, opts ...B } func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics { + // make sure we have functional registry + if promReg == nil { + promReg = prometheus.DefaultRegisterer + } + // How many CAR Fetch attempts we had? Need this to calculate % of various car request types. // We only count attempts here, because success/failure with/without retries are provided by caboose: // - ipfs_caboose_fetch_duration_car_success_count @@ -129,7 +134,7 @@ func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics Name: "car_fetch_attempts", Help: "The number of times a CAR fetch was attempted by IPFSBackend.", }) - promReg.MustRegister(carFetchAttemptMetric) + registerMetric(promReg, carFetchAttemptMetric) contextAlreadyCancelledMetric := prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "ipfs", @@ -137,7 +142,7 @@ func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics Name: "car_fetch_context_already_cancelled", Help: "The number of times context is already cancelled when a CAR fetch was attempted by IPFSBackend.", }) - promReg.MustRegister(contextAlreadyCancelledMetric) + registerMetric(promReg, contextAlreadyCancelledMetric) // How many blocks were read via CARs? // Need this as a baseline to reason about error ratio vs raw_block_recovery_attempts. @@ -147,7 +152,7 @@ func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics Name: "car_blocks_fetched", Help: "The number of blocks successfully read via CAR fetch.", }) - promReg.MustRegister(carBlocksFetchedMetric) + registerMetric(promReg, carBlocksFetchedMetric) carParamsMetric := prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "ipfs", @@ -155,7 +160,7 @@ func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics Name: "car_fetch_params", Help: "How many times specific CAR parameter was used during CAR data fetch.", }, []string{"dagScope", "entityRanges"}) // we use 'ranges' instead of 'bytes' here because we only count the number of ranges present - promReg.MustRegister(carParamsMetric) + registerMetric(promReg, carParamsMetric) bytesRangeStartMetric := prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "ipfs", @@ -164,7 +169,7 @@ func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics Help: "Tracks where did the range request start.", Buckets: prometheus.ExponentialBuckets(1024, 2, 24), // 1024 bytes to 8 GiB }) - promReg.MustRegister(bytesRangeStartMetric) + registerMetric(promReg, bytesRangeStartMetric) bytesRangeSizeMetric := prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "ipfs", @@ -173,7 +178,7 @@ func registerCarBackendMetrics(promReg prometheus.Registerer) *CarBackendMetrics Help: "Tracks the size of range requests.", Buckets: prometheus.ExponentialBuckets(256*1024, 2, 10), // From 256KiB to 100MiB }) - promReg.MustRegister(bytesRangeSizeMetric) + registerMetric(promReg, bytesRangeSizeMetric) return &CarBackendMetrics{ contextAlreadyCancelledMetric, diff --git a/gateway/blockstore.go b/gateway/blockstore.go index 11e51b93e..07c758d05 100644 --- a/gateway/blockstore.go +++ b/gateway/blockstore.go @@ -7,6 +7,7 @@ import ( "io" "math/rand" "net/http" + "sync/atomic" "time" "github.com/ipfs/go-cid" @@ -18,13 +19,12 @@ import ( lru "github.com/hashicorp/golang-lru/v2" "github.com/prometheus/client_golang/prometheus" - uatomic "go.uber.org/atomic" "go.uber.org/zap/zapcore" ) type cacheBlockStore struct { cache *lru.TwoQueueCache[string, []byte] - rehash *uatomic.Bool + rehash atomic.Bool cacheHitsMetric prometheus.Counter cacheRequestsMetric prometheus.Counter } @@ -35,8 +35,7 @@ var _ blockstore.Blockstore = (*cacheBlockStore)(nil) // in memory using a two queue cache. It can be useful, for example, when paired // with a proxy blockstore (see [NewRemoteBlockstore]). // -// If the given [prometheus.Registerer] is nil, a new one will be created using -// [prometheus.NewRegistry]. +// If the given [prometheus.Registerer] is nil, a [prometheus.DefaultRegisterer] will be used. func NewCacheBlockStore(size int, reg prometheus.Registerer) (blockstore.Blockstore, error) { c, err := lru.New2Q[string, []byte](size) if err != nil { @@ -44,7 +43,7 @@ func NewCacheBlockStore(size int, reg prometheus.Registerer) (blockstore.Blockst } if reg == nil { - reg = prometheus.NewRegistry() + reg = prometheus.DefaultRegisterer } cacheHitsMetric := prometheus.NewCounter(prometheus.CounterOpts{ @@ -61,19 +60,11 @@ func NewCacheBlockStore(size int, reg prometheus.Registerer) (blockstore.Blockst Help: "The number of global block cache requests.", }) - err = reg.Register(cacheHitsMetric) - if err != nil { - return nil, err - } - - err = reg.Register(cacheRequestsMetric) - if err != nil { - return nil, err - } + registerMetric(reg, cacheHitsMetric) + registerMetric(reg, cacheRequestsMetric) return &cacheBlockStore{ cache: c, - rehash: uatomic.NewBool(false), cacheHitsMetric: cacheHitsMetric, cacheRequestsMetric: cacheRequestsMetric, }, nil diff --git a/gateway/metrics.go b/gateway/metrics.go index 32bb66568..29df1272d 100644 --- a/gateway/metrics.go +++ b/gateway/metrics.go @@ -307,3 +307,14 @@ var tracer = otel.Tracer("boxo/gateway") func spanTrace(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { return tracer.Start(ctx, "Gateway."+spanName, opts...) } + +// registerMetric registers metrics in registry or logs an error. +// +// Registration may error if metric is alreadyregistered. we are not using +// MustRegister here to allow people to run tests in parallel without having to +// write tedious glue code that creates unique registry for each unit test +func registerMetric(registry prometheus.Registerer, metric prometheus.Collector) { + if err := registry.Register(metric); err != nil { + log.Errorf("failed to register %v: %v", metric, err) + } +} diff --git a/go.mod b/go.mod index a324e8b71..3cb9d498d 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,19 @@ module github.com/ipfs/boxo -go 1.22 +go 1.22.0 + +toolchain go1.22.8 require ( - github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/benbjohnson/clock v1.3.5 github.com/cespare/xxhash/v2 v2.3.0 - github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 + github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf github.com/cskr/pubsub v1.0.2 github.com/dustin/go-humanize v1.0.1 - github.com/gabriel-vasile/mimetype v1.4.4 + github.com/gabriel-vasile/mimetype v1.4.6 + github.com/gammazero/chanqueue v1.0.0 + github.com/gammazero/deque v1.0.0 github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 @@ -23,60 +27,59 @@ require ( github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-detect-race v0.0.1 github.com/ipfs/go-ipfs-delay v0.0.1 - github.com/ipfs/go-ipfs-redirects-file v0.1.1 + github.com/ipfs/go-ipfs-redirects-file v0.1.2 github.com/ipfs/go-ipld-format v0.6.0 github.com/ipfs/go-ipld-legacy v0.2.1 github.com/ipfs/go-log/v2 v2.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-peertaskqueue v0.8.1 github.com/ipfs/go-test v0.0.4 - github.com/ipfs/go-unixfsnode v1.9.0 + github.com/ipfs/go-unixfsnode v1.9.2 github.com/ipld/go-car v0.6.2 - github.com/ipld/go-car/v2 v2.13.1 + github.com/ipld/go-car/v2 v2.14.2 github.com/ipld/go-codec-dagpb v1.6.0 github.com/ipld/go-ipld-prime v0.21.0 - github.com/jbenet/goprocess v0.1.4 github.com/libp2p/go-buffer-pool v0.1.0 github.com/libp2p/go-doh-resolver v0.4.0 - github.com/libp2p/go-libp2p v0.36.3 - github.com/libp2p/go-libp2p-kad-dht v0.25.2 + github.com/libp2p/go-libp2p v0.37.2 + github.com/libp2p/go-libp2p-kad-dht v0.27.0 github.com/libp2p/go-libp2p-record v0.2.0 - github.com/libp2p/go-libp2p-routing-helpers v0.7.3 + github.com/libp2p/go-libp2p-routing-helpers v0.7.4 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 - github.com/miekg/dns v1.1.61 + github.com/miekg/dns v1.1.62 github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-base32 v0.1.0 github.com/multiformats/go-multiaddr v0.13.0 - github.com/multiformats/go-multiaddr-dns v0.3.1 + github.com/multiformats/go-multiaddr-dns v0.4.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 - github.com/multiformats/go-multistream v0.5.0 - github.com/pkg/errors v0.9.1 + github.com/multiformats/go-multistream v0.6.0 github.com/polydawn/refmt v0.89.0 - github.com/prometheus/client_golang v1.19.1 - github.com/samber/lo v1.39.0 + github.com/prometheus/client_golang v1.20.5 + github.com/samber/lo v1.47.0 + github.com/slok/go-http-metrics v0.12.0 github.com/spaolacci/murmur3 v1.1.0 github.com/stretchr/testify v1.9.0 github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f go.opencensus.io v0.24.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 - go.opentelemetry.io/otel v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 - go.opentelemetry.io/otel/exporters/zipkin v1.27.0 - go.opentelemetry.io/otel/sdk v1.27.0 - go.opentelemetry.io/otel/trace v1.27.0 - go.uber.org/atomic v1.11.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 + go.opentelemetry.io/otel/exporters/zipkin v1.31.0 + go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 - golang.org/x/oauth2 v0.21.0 - golang.org/x/sync v0.7.0 - golang.org/x/sys v0.22.0 - google.golang.org/protobuf v1.34.2 + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c + golang.org/x/oauth2 v0.23.0 + golang.org/x/sync v0.8.0 + golang.org/x/sys v0.26.0 + google.golang.org/protobuf v1.35.1 ) require ( @@ -97,12 +100,11 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20241017200806-017d972448fc // indirect github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/huin/goupnp v1.3.0 // indirect @@ -115,17 +117,17 @@ require ( github.com/ipfs/go-ipld-cbor v0.1.0 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-merkledag v0.11.0 // indirect - github.com/ipfs/go-unixfs v0.4.5 // indirect github.com/ipfs/go-verifcid v0.0.3 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/jbenet/goprocess v0.1.4 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect - github.com/libp2p/go-flow-metrics v0.1.0 // indirect + github.com/libp2p/go-flow-metrics v0.2.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.4 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect @@ -139,59 +141,60 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.19.1 // indirect + github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 // indirect - github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/datachannel v1.5.9 // indirect github.com/pion/dtls/v2 v2.2.12 // indirect - github.com/pion/ice/v2 v2.3.34 // indirect - github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/ice/v2 v2.3.36 // indirect + github.com/pion/interceptor v0.1.37 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.8 // indirect - github.com/pion/sctp v1.8.20 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.3.0 // indirect + github.com/pion/webrtc/v3 v3.3.4 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.45.2 // indirect - github.com/quic-go/webtransport-go v0.8.0 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.48.2 // indirect + github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 // indirect github.com/whyrusleeping/cbor-gen v0.1.2 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect - github.com/wlynxg/anet v0.0.3 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.27.0 // indirect + github.com/wlynxg/anet v0.0.5 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.22.1 // indirect - go.uber.org/mock v0.4.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.23.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/mock v0.5.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/tools v0.26.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gonum.org/v1/gonum v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 // indirect + google.golang.org/grpc v1.67.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index b68267e3f..c92125e8f 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGy github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= -github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -39,8 +39,8 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668 h1:ZFUue+PNxmHlu7pYv+IYMtqlaO/0VwaGEqKepZf9JpA= -github.com/crackcomm/go-gitignore v0.0.0-20231225121904-e25f5bc08668/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf h1:dwGgBWn84wUS1pVikGiruW+x5XM4amhjaZO20vCjay4= +github.com/crackcomm/go-gitignore v0.0.0-20241020182519-7843d2ba8fdf/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -75,8 +75,12 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= -github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= +github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= +github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= +github.com/gammazero/chanqueue v1.0.0 h1:FER/sMailGFA3DDvFooEkipAMU+3c9Bg3bheloPSz6o= +github.com/gammazero/chanqueue v1.0.0/go.mod h1:fMwpwEiuUgpab0sH4VHiVcEoji1pSi+EIzeG4TPeKPc= +github.com/gammazero/deque v1.0.0 h1:LTmimT8H7bXkkCy6gZX7zNLtkbz4NdS2z8LZuor3j34= +github.com/gammazero/deque v1.0.0/go.mod h1:iflpYvtGfM3U8S8j+sZEKIak3SAKYpA5/SQewgfXDKo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= @@ -129,8 +133,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241017200806-017d972448fc h1:NGyrhhFhwvRAZg02jnYVg3GBQy0qGBKmFQJwaPmpmxs= +github.com/google/pprof v0.0.0-20241017200806-017d972448fc/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -147,8 +151,8 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -182,8 +186,6 @@ github.com/ipfs/go-ipfs-blockstore v1.3.1 h1:cEI9ci7V0sRNivqaOr0elDsamxXFxJMMMy7 github.com/ipfs/go-ipfs-blockstore v1.3.1/go.mod h1:KgtZyc9fq+P2xJUiCAzbRdhhqJHvsw8u2Dlqy2MyRTE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= -github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v1.1.1 h1:B5UJOH52IbcfS56+Ul+sv8jnIV10lbjLF5eOO0C66Nw= @@ -194,8 +196,8 @@ github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uY github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-redirects-file v0.1.1 h1:Io++k0Vf/wK+tfnhEh63Yte1oQK5VGT2hIEYpD0Rzx8= -github.com/ipfs/go-ipfs-redirects-file v0.1.1/go.mod h1:tAwRjCV0RjLTjH8DR/AU7VYvfQECg+lpUy2Mdzv7gyk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2 h1:QCK7VtL91FH17KROVVy5KrzDx2hu68QvB2FTWk08ZQk= +github.com/ipfs/go-ipfs-redirects-file v0.1.2/go.mod h1:yIiTlLcDEM/8lS6T3FlCEXZktPPqSOyuY6dEzVqw7Fw= github.com/ipfs/go-ipfs-routing v0.3.0 h1:9W/W3N+g+y4ZDeffSgqhgo7BsBSJwPMcyssET9OWevc= github.com/ipfs/go-ipfs-routing v0.3.0/go.mod h1:dKqtTFIql7e1zYsEuWLyuOU+E0WJWW8JjbTPLParDWo= github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= @@ -219,16 +221,14 @@ github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVzte github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= github.com/ipfs/go-test v0.0.4 h1:DKT66T6GBB6PsDFLoO56QZPrOmzJkqU1FZH5C9ySkew= github.com/ipfs/go-test v0.0.4/go.mod h1:qhIM1EluEfElKKM6fnWxGn822/z9knUGM1+I/OAQNKI= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-unixfs v0.4.5/go.mod h1:BIznJNvt/gEx/ooRMI4Us9K8+qeGO7vx1ohnbk8gjFg= -github.com/ipfs/go-unixfsnode v1.9.0 h1:ubEhQhr22sPAKO2DNsyVBW7YB/zA8Zkif25aBvz8rc8= -github.com/ipfs/go-unixfsnode v1.9.0/go.mod h1:HxRu9HYHOjK6HUqFBAi++7DVoWAHn0o4v/nZ/VA+0g8= +github.com/ipfs/go-unixfsnode v1.9.2 h1:0A12BYs4XOtDPJTMlwmNPlllDfqcc4yie4e919hcUXk= +github.com/ipfs/go-unixfsnode v1.9.2/go.mod h1:v1nuMFHf4QTIhFUdPMvg1nQu7AqDLvIdwyvJ531Ot1U= github.com/ipfs/go-verifcid v0.0.3 h1:gmRKccqhWDocCRkC+a59g5QW7uJw5bpX9HWBevXa0zs= github.com/ipfs/go-verifcid v0.0.3/go.mod h1:gcCtGniVzelKrbk9ooUSX/pM3xlH73fZZJDzQJRvOUw= github.com/ipld/go-car v0.6.2 h1:Hlnl3Awgnq8icK+ze3iRghk805lu8YNq3wlREDTF2qc= github.com/ipld/go-car v0.6.2/go.mod h1:oEGXdwp6bmxJCZ+rARSkDliTeYnVzv3++eXajZ+Bmr8= -github.com/ipld/go-car/v2 v2.13.1 h1:KnlrKvEPEzr5IZHKTXLAEub+tPrzeAFQVRlSQvuxBO4= -github.com/ipld/go-car/v2 v2.13.1/go.mod h1:QkdjjFNGit2GIkpQ953KBwowuoukoM75nP/JI1iDJdo= +github.com/ipld/go-car/v2 v2.14.2 h1:9ERr7KXpCC7If0rChZLhYDlyr6Bes6yRKPJnCO3hdHY= +github.com/ipld/go-car/v2 v2.14.2/go.mod h1:0iPB/825lTZLU2zPK5bVTk/R3V2612E1VI279OGSXWA= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= @@ -237,7 +237,6 @@ github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236 github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd/go.mod h1:wZ8hH8UxeryOs4kJEJaiui/s00hDSbE37OKsL47g+Sw= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= @@ -251,8 +250,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -272,20 +271,20 @@ github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38y github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+0S7FQqw= github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= -github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= -github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= -github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= +github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw= +github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc= +github.com/libp2p/go-libp2p v0.37.2 h1:Irh+n9aDPTLt9wJYwtlHu6AhMUipbC1cGoJtOiBqI9c= +github.com/libp2p/go-libp2p v0.37.2/go.mod h1:M8CRRywYkqC6xKHdZ45hmqVckBj5z4mRLIMLWReypz8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= -github.com/libp2p/go-libp2p-kad-dht v0.25.2 h1:FOIk9gHoe4YRWXTu8SY9Z1d0RILol0TrtApsMDPjAVQ= -github.com/libp2p/go-libp2p-kad-dht v0.25.2/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= -github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= -github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-kad-dht v0.27.0 h1:1Ea32tVTPiAfaLpPMbaBWFJgbsi/JpMqC2YBuFdf32o= +github.com/libp2p/go-libp2p-kad-dht v0.27.0/go.mod h1:ixhjLuzaXSGtWsKsXTj7erySNuVC4UP7NO015cRrF14= +github.com/libp2p/go-libp2p-kbucket v0.6.4 h1:OjfiYxU42TKQSB8t8WYd8MKhYhMJeO2If+NiuKfb6iQ= +github.com/libp2p/go-libp2p-kbucket v0.6.4/go.mod h1:jp6w82sczYaBsAypt5ayACcRJi0lgsba7o4TzJKEfWA= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= -github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4 h1:6LqS1Bzn5CfDJ4tzvP9uwh42IB7TJLNFJA6dEeGBv84= +github.com/libp2p/go-libp2p-routing-helpers v0.7.4/go.mod h1:we5WDj9tbolBXOuF1hGOkR+r7Uh1408tQbAKaT5n1LE= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= @@ -308,8 +307,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -334,8 +333,8 @@ github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y9 github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= -github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= -github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= +github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M= +github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= @@ -345,8 +344,8 @@ github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI1 github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= -github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= +github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA= +github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= @@ -354,10 +353,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.19.1 h1:QXgq3Z8Crl5EL1WBAC98A5sEBHARrAJNzAmMxzLcRF0= -github.com/onsi/ginkgo/v2 v2.19.1/go.mod h1:O3DtEWQkPa/F7fBMgmZQKKsluAy8pd3rEQdrjkPb9zA= -github.com/onsi/gomega v1.34.0 h1:eSSPsPNp6ZpsG8X1OVmOTxig+CblTc4AxpPBykhe2Os= -github.com/onsi/gomega v1.34.0/go.mod h1:MIKI8c+f+QLWk+hxbePD4i0LMJSExPaZOVfkoex4cAo= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -370,15 +369,15 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= -github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= -github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= +github.com/pion/datachannel v1.5.9 h1:LpIWAOYPyDrXtU+BW7X0Yt/vGtYxtXQ8ql7dFfYUVZA= +github.com/pion/datachannel v1.5.9/go.mod h1:kDUuk4CU4Uxp82NH4LQZbISULkX/HtzKa4P7ldf9izE= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= -github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/ice/v2 v2.3.36 h1:SopeXiVbbcooUg2EIR8sq4b13RQ8gzrkkldOVg+bBsc= +github.com/pion/ice/v2 v2.3.36/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI= +github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= @@ -389,10 +388,10 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.8 h1:EtYFHI0rpUEjT/RMnGfb1vdJhbYmPG77szD72uUnSxs= -github.com/pion/rtp v1.8.8/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.20 h1:sOc3lkV/tQaP57ZUEXIMdM2V92IIB2ia5v/ygnBxaEg= -github.com/pion/sctp v1.8.20/go.mod h1:oTxw8i5m+WbDHZJL/xUpe6CPIn1Y0GIKKwTLF4h53H8= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= @@ -405,13 +404,13 @@ github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLh github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.6 h1:k1mQU06bmmX143qSWgXFqSH1KUJceQvIUuVH/K5ELWw= -github.com/pion/transport/v3 v3.0.6/go.mod h1:HvJr2N/JwNJAfipsRleqwFoR3t/pWyHeZUs89v3+t5s= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= -github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= +github.com/pion/webrtc/v3 v3.3.4 h1:v2heQVnXTSqNRXcaFQVOhIOYkLMxOu1iJG8uy1djvkk= +github.com/pion/webrtc/v3 v3.3.4/go.mod h1:liNa+E1iwyzyXqNUwvoMRNQ10x8h8FOeJKL8RkIbamE= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -420,33 +419,33 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= -github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.45.2 h1:DfqBmqjb4ExSdxRIb/+qXhPC+7k6+DUNZha4oeiC9fY= -github.com/quic-go/quic-go v0.45.2/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= -github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= -github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg= +github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= -github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -472,6 +471,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/slok/go-http-metrics v0.12.0 h1:mAb7hrX4gB4ItU6NkFoKYdBslafg3o60/HbGBRsKaG8= +github.com/slok/go-http-metrics v0.12.0/go.mod h1:Ee/mdT9BYvGrlGzlClkK05pP2hRHmVbRF9dtUVS8LNA= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= @@ -497,8 +498,6 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -519,8 +518,9 @@ github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/wlynxg/anet v0.0.3 h1:PvR53psxFXstc12jelG6f1Lv4MWqE0tI76/hHGjh9rg= github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU= +github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -528,41 +528,41 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0= -go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= -go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 h1:/0YaXu3755A/cFbtXp+21lkXgI0QE5avTWA2HjU9/WE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0/go.mod h1:m7SFxp0/7IxmJPLIY3JhOcU9CoFzDaCPL6xxQIxhA+o= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0 h1:aXcxb7F6ZDC1o2Z52LDfS2g6M2FB5CrxdR2gzY4QRNs= -go.opentelemetry.io/otel/exporters/zipkin v1.27.0/go.mod h1:+WMURoi4KmVB7ypbFPx3xtZTWen2Ca3lRK9u6DVTO5M= -go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= -go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= -go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= -go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= -go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= -go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/exporters/zipkin v1.31.0 h1:CgucL0tj3717DJnni7HVVB2wExzi8c2zJNEA2BhLMvI= +go.opentelemetry.io/otel/exporters/zipkin v1.31.0/go.mod h1:rfzOVNiSwIcWtEC2J8epwG26fiaXlYvLySJ7bwsrtAE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.22.1 h1:nvvln7mwyT5s1q201YE29V/BFrGor6vMiDNpU/78Mys= -go.uber.org/fx v1.22.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -587,11 +587,11 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -605,8 +605,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -630,14 +630,14 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -649,8 +649,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -677,8 +677,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -694,8 +694,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -719,14 +719,14 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.15.0 h1:2lYxjRbTYyxkJxlhC+LvJIx3SsANPdRybu1tGj9/OrQ= gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -743,10 +743,10 @@ google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= -google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg= +google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9 h1:QCqS/PdaHTSWGvupk2F/ehwHtGc0/GYkT+3GAcR1CCc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -755,8 +755,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -766,8 +766,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/ipld/merkledag/merkledag.go b/ipld/merkledag/merkledag.go index a227780ff..1c638d139 100644 --- a/ipld/merkledag/merkledag.go +++ b/ipld/merkledag/merkledag.go @@ -6,6 +6,7 @@ import ( "errors" "sync" + "github.com/gammazero/deque" bserv "github.com/ipfs/boxo/blockservice" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" @@ -535,7 +536,7 @@ func parallelWalkDepth(ctx context.Context, getLinks GetLinks, root cid.Cid, vis defer close(feed) send := feed - var todoQueue []cidDepth + var todoQueue deque.Deque[cidDepth] var inProgress int next := cidDepth{ @@ -547,9 +548,8 @@ func parallelWalkDepth(ctx context.Context, getLinks GetLinks, root cid.Cid, vis select { case send <- next: inProgress++ - if len(todoQueue) > 0 { - next = todoQueue[0] - todoQueue = todoQueue[1:] + if todoQueue.Len() > 0 { + next = todoQueue.PopFront() } else { next = cidDepth{} send = nil @@ -570,7 +570,7 @@ func parallelWalkDepth(ctx context.Context, getLinks GetLinks, root cid.Cid, vis next = cd send = feed } else { - todoQueue = append(todoQueue, cd) + todoQueue.PushBack(cd) } } case err := <-errChan: diff --git a/ipld/merkledag/traverse/traverse.go b/ipld/merkledag/traverse/traverse.go index a3836e385..125e5d7db 100644 --- a/ipld/merkledag/traverse/traverse.go +++ b/ipld/merkledag/traverse/traverse.go @@ -5,6 +5,7 @@ import ( "context" "errors" + "github.com/gammazero/deque" ipld "github.com/ipfs/go-ipld-format" ) @@ -167,10 +168,10 @@ func bfsTraverse(root State, t *traversal) error { return err } - var q queue - q.enq(root) - for q.len() > 0 { - curr := q.deq() + var q deque.Deque[State] + q.PushBack(root) + for q.Len() > 0 { + curr := q.PopFront() if curr.Node == nil { return errors.New("failed to dequeue though queue not empty") } @@ -189,7 +190,7 @@ func bfsTraverse(root State, t *traversal) error { continue } - q.enq(State{ + q.PushBack(State{ Node: node, Depth: curr.Depth + 1, }) @@ -197,24 +198,3 @@ func bfsTraverse(root State, t *traversal) error { } return nil } - -type queue struct { - s []State -} - -func (q *queue) enq(n State) { - q.s = append(q.s, n) -} - -func (q *queue) deq() State { - if len(q.s) < 1 { - return State{} - } - n := q.s[0] - q.s = q.s[1:] - return n -} - -func (q *queue) len() int { - return len(q.s) -} diff --git a/ipld/unixfs/hamt/hamt.go b/ipld/unixfs/hamt/hamt.go index 455d070c6..41bf7dd14 100644 --- a/ipld/unixfs/hamt/hamt.go +++ b/ipld/unixfs/hamt/hamt.go @@ -29,10 +29,10 @@ import ( "os" "sync" + "github.com/gammazero/deque" + dag "github.com/ipfs/boxo/ipld/merkledag" format "github.com/ipfs/boxo/ipld/unixfs" "github.com/ipfs/boxo/ipld/unixfs/internal" - - dag "github.com/ipfs/boxo/ipld/merkledag" bitfield "github.com/ipfs/go-bitfield" cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" @@ -563,7 +563,7 @@ func parallelShardWalk(ctx context.Context, root *Shard, dserv ipld.DAGService, } send := feed - var todoQueue []*listCidsAndShards + var todoQueue deque.Deque[*listCidsAndShards] var inProgress int next := &listCidsAndShards{ @@ -575,9 +575,8 @@ dispatcherLoop: select { case send <- next: inProgress++ - if len(todoQueue) > 0 { - next = todoQueue[0] - todoQueue = todoQueue[1:] + if todoQueue.Len() > 0 { + next = todoQueue.PopFront() } else { next = nil send = nil @@ -592,7 +591,7 @@ dispatcherLoop: next = nextNodes send = feed } else { - todoQueue = append(todoQueue, nextNodes) + todoQueue.PushBack(nextNodes) } case <-errGrpCtx.Done(): break dispatcherLoop diff --git a/ipns/pb/record.pb.go b/ipns/pb/record.pb.go index 1f0effbd8..f6eeeef89 100644 --- a/ipns/pb/record.pb.go +++ b/ipns/pb/record.pb.go @@ -1,17 +1,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.12 +// protoc-gen-go v1.35.1 +// protoc v5.28.2 // source: record.proto package pb import ( - reflect "reflect" - sync "sync" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) const ( @@ -64,29 +63,30 @@ func (IpnsRecord_ValidityType) EnumDescriptor() ([]byte, []int) { return file_record_proto_rawDescGZIP(), []int{0, 0} } +// https://specs.ipfs.tech/ipns/ipns-record/#record-serialization-format type IpnsRecord struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // 1-6 are legacy fields used only in V1+V2 records Value []byte `protobuf:"bytes,1,opt,name=value,proto3,oneof" json:"value,omitempty"` SignatureV1 []byte `protobuf:"bytes,2,opt,name=signatureV1,proto3,oneof" json:"signatureV1,omitempty"` ValidityType *IpnsRecord_ValidityType `protobuf:"varint,3,opt,name=validityType,proto3,enum=github.com.boxo.ipns.pb.IpnsRecord_ValidityType,oneof" json:"validityType,omitempty"` Validity []byte `protobuf:"bytes,4,opt,name=validity,proto3,oneof" json:"validity,omitempty"` Sequence *uint64 `protobuf:"varint,5,opt,name=sequence,proto3,oneof" json:"sequence,omitempty"` Ttl *uint64 `protobuf:"varint,6,opt,name=ttl,proto3,oneof" json:"ttl,omitempty"` - PubKey []byte `protobuf:"bytes,7,opt,name=pubKey,proto3,oneof" json:"pubKey,omitempty"` - SignatureV2 []byte `protobuf:"bytes,8,opt,name=signatureV2,proto3,oneof" json:"signatureV2,omitempty"` - Data []byte `protobuf:"bytes,9,opt,name=data,proto3,oneof" json:"data,omitempty"` + // 7-9 are V2 records + PubKey []byte `protobuf:"bytes,7,opt,name=pubKey,proto3,oneof" json:"pubKey,omitempty"` + SignatureV2 []byte `protobuf:"bytes,8,opt,name=signatureV2,proto3,oneof" json:"signatureV2,omitempty"` + Data []byte `protobuf:"bytes,9,opt,name=data,proto3,oneof" json:"data,omitempty"` } func (x *IpnsRecord) Reset() { *x = IpnsRecord{} - if protoimpl.UnsafeEnabled { - mi := &file_record_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_record_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IpnsRecord) String() string { @@ -97,7 +97,7 @@ func (*IpnsRecord) ProtoMessage() {} func (x *IpnsRecord) ProtoReflect() protoreflect.Message { mi := &file_record_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -227,15 +227,12 @@ func file_record_proto_rawDescGZIP() []byte { return file_record_proto_rawDescData } -var ( - file_record_proto_enumTypes = make([]protoimpl.EnumInfo, 1) - file_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) - file_record_proto_goTypes = []interface{}{ - (IpnsRecord_ValidityType)(0), // 0: github.com.boxo.ipns.pb.IpnsRecord.ValidityType - (*IpnsRecord)(nil), // 1: github.com.boxo.ipns.pb.IpnsRecord - } -) - +var file_record_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_record_proto_goTypes = []any{ + (IpnsRecord_ValidityType)(0), // 0: github.com.boxo.ipns.pb.IpnsRecord.ValidityType + (*IpnsRecord)(nil), // 1: github.com.boxo.ipns.pb.IpnsRecord +} var file_record_proto_depIdxs = []int32{ 0, // 0: github.com.boxo.ipns.pb.IpnsRecord.validityType:type_name -> github.com.boxo.ipns.pb.IpnsRecord.ValidityType 1, // [1:1] is the sub-list for method output_type @@ -250,21 +247,7 @@ func file_record_proto_init() { if File_record_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_record_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IpnsRecord); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_record_proto_msgTypes[0].OneofWrappers = []interface{}{} + file_record_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/mfs/repub.go b/mfs/repub.go index 463810414..0ad0d33a2 100644 --- a/mfs/repub.go +++ b/mfs/repub.go @@ -2,43 +2,47 @@ package mfs import ( "context" + "errors" + "sync" "time" cid "github.com/ipfs/go-cid" ) +// closeTimeout is how long to wait for current publishing to finish before +// shutting down the republisher. +const closeTimeout = 5 * time.Second + // PubFunc is the user-defined function that determines exactly what // logic entails "publishing" a `Cid` value. type PubFunc func(context.Context, cid.Cid) error // Republisher manages when to publish a given entry. type Republisher struct { - TimeoutLong time.Duration - TimeoutShort time.Duration - RetryTimeout time.Duration - pubfunc PubFunc - + pubfunc PubFunc update chan cid.Cid immediatePublish chan chan struct{} - ctx context.Context - cancel func() + cancel func() + closeOnce sync.Once + stopped chan struct{} } // NewRepublisher creates a new Republisher object to republish the given root // using the given short and long time intervals. -func NewRepublisher(ctx context.Context, pf PubFunc, tshort, tlong time.Duration) *Republisher { - ctx, cancel := context.WithCancel(ctx) - return &Republisher{ - TimeoutShort: tshort, - TimeoutLong: tlong, - RetryTimeout: tlong, +func NewRepublisher(pf PubFunc, tshort, tlong time.Duration, lastPublished cid.Cid) *Republisher { + ctx, cancel := context.WithCancel(context.Background()) + rp := &Republisher{ update: make(chan cid.Cid, 1), pubfunc: pf, immediatePublish: make(chan chan struct{}), - ctx: ctx, cancel: cancel, + stopped: make(chan struct{}), } + + go rp.run(ctx, tshort, tlong, lastPublished) + + return rp } // WaitPub waits for the current value to be published (or returns early @@ -58,10 +62,22 @@ func (rp *Republisher) WaitPub(ctx context.Context) error { } } +// Close tells the republisher to stop and waits for it to stop. func (rp *Republisher) Close() error { - // TODO(steb): Wait for `Run` to stop - err := rp.WaitPub(rp.ctx) - rp.cancel() + var err error + rp.closeOnce.Do(func() { + // Wait a short amount of time for any current publishing to finish. + ctx, cancel := context.WithTimeout(context.Background(), closeTimeout) + err = rp.WaitPub(ctx) + if errors.Is(err, context.DeadlineExceeded) { + err = errors.New("mfs/republisher: timed out waiting to publish during close") + } + cancel() + // Shutdown the publisher. + rp.cancel() + }) + // Wait for pblisher to stop and then return. + <-rp.stopped return err } @@ -82,22 +98,28 @@ func (rp *Republisher) Update(c cid.Cid) { } // Run contains the core logic of the `Republisher`. It calls the user-defined -// `pubfunc` function whenever the `Cid` value is updated to a *new* value. The -// complexity comes from the fact that `pubfunc` may be slow so we need to batch -// updates. +// `pubfunc` function whenever the `Cid` value is updated to a *new* value. +// Since calling the `pubfunc` may be slow, updates are batched // // Algorithm: -// 1. When we receive the first update after publishing, we set a `longer` timer. -// 2. When we receive any update, we reset the `quick` timer. -// 3. If either the `quick` timeout or the `longer` timeout elapses, -// we call `publish` with the latest updated value. +// 1. When receiving the first update after publishing, set a `longer` timer +// 2. When receiving any update, reset the `quick` timer +// 3. If either the `quick` timeout or the `longer` timeout elapses, call +// `publish` with the latest updated value. +// +// The `longer` timer ensures that publishing is delayed by at most that +// duration. The `quick` timer allows publishing sooner if there are no more +// updates available. // -// The `longer` timer ensures that we delay publishing by at most -// `TimeoutLong`. The `quick` timer allows us to publish sooner if -// it looks like there are no more updates coming down the pipe. +// In other words, the quick timeout means there are no more values to put into +// the "batch", so do update. The long timeout means there are that the "batch" +// is full, so do update, even though there are still values (no quick timeout +// yet) arriving. // -// Note: If a publish fails, we retry repeatedly every TimeoutRetry. -func (rp *Republisher) Run(lastPublished cid.Cid) { +// If a publish fails, retry repeatedly every `longer` timeout. +func (rp *Republisher) run(ctx context.Context, timeoutShort, timeoutLong time.Duration, lastPublished cid.Cid) { + defer close(rp.stopped) + quick := time.NewTimer(0) if !quick.Stop() { <-quick.C @@ -107,12 +129,13 @@ func (rp *Republisher) Run(lastPublished cid.Cid) { <-longer.C } + immediatePublish := rp.immediatePublish var toPublish cid.Cid - for rp.ctx.Err() == nil { - var waiter chan struct{} + var waiter chan struct{} + for { select { - case <-rp.ctx.Done(): + case <-ctx.Done(): return case newValue := <-rp.update: // Skip already published values. @@ -123,19 +146,20 @@ func (rp *Republisher) Run(lastPublished cid.Cid) { break } - // If we aren't already waiting to publish something, - // reset the long timeout. + // If not already waiting to publish something, reset the long + // timeout. if !toPublish.Defined() { - longer.Reset(rp.TimeoutLong) + longer.Reset(timeoutLong) } // Always reset the short timeout. - quick.Reset(rp.TimeoutShort) + quick.Reset(timeoutShort) // Finally, set the new value to publish. toPublish = newValue + // Wait for a newer value or the quick timer. continue - case waiter = <-rp.immediatePublish: + case waiter = <-immediatePublish: // Make sure to grab the *latest* value to publish. select { case toPublish = <-rp.update: @@ -147,51 +171,62 @@ func (rp *Republisher) Run(lastPublished cid.Cid) { toPublish = cid.Undef } case <-quick.C: + // Waited a short time for more updates and no more received. case <-longer.C: + // Keep getting updates and now it is time to send what has been + // received so far. } // Cleanup, publish, and close waiters. - // 1. Stop any timers. Don't use the `if !t.Stop() { ... }` - // idiom as these timers may not be running. - + // 1. Stop any timers. quick.Stop() + longer.Stop() + + // Do not use the `if !t.Stop() { ... }` idiom as these timers may not + // be running. + // + // TODO: remove after go1.23 required. select { case <-quick.C: default: } - - longer.Stop() select { case <-longer.C: default: } - // 2. If we have a value to publish, publish it now. + // 2. If there is a value to publish then publish it now. if toPublish.Defined() { - for { - err := rp.pubfunc(rp.ctx, toPublish) - if err == nil { - break - } - // Keep retrying until we succeed or we abort. - // TODO(steb): We could try pulling new values - // off `update` but that's not critical (and - // complicates this code a bit). We'll pull off - // a new value on the next loop through. - select { - case <-time.After(rp.RetryTimeout): - case <-rp.ctx.Done(): - return - } + err := rp.pubfunc(ctx, toPublish) + if err != nil { + // Republish failed, so retry after waiting for long timeout. + // + // Instead of entering a retry loop here, go back to waiting + // for more values and retrying to publish after the lomg + // timeout. Keep using the current waiter until it has been + // notified of a successful publish. + // + // Reset the long timer as it effectively becomes the retry + // timeout. + longer.Reset(timeoutLong) + // Stop reading waiters from immediatePublish while retrying, + // This causes the current waiter to be notified only after a + // successful call to pubfunc, and is what constitutes a retry. + immediatePublish = nil + continue } lastPublished = toPublish toPublish = cid.Undef + // Resume reading waiters, + immediatePublish = rp.immediatePublish } - // 3. Trigger anything waiting in `WaitPub`. + // 3. Notify anything waiting in `WaitPub` on successful call to + // pubfunc or if nothing to publish. if waiter != nil { close(waiter) + waiter = nil } } } diff --git a/mfs/repub_test.go b/mfs/repub_test.go index 6be5624ab..bb6385019 100644 --- a/mfs/repub_test.go +++ b/mfs/repub_test.go @@ -7,6 +7,7 @@ import ( cid "github.com/ipfs/go-cid" ci "github.com/libp2p/go-libp2p-testing/ci" + "github.com/stretchr/testify/require" ) func TestRepublisher(t *testing.T) { @@ -14,12 +15,14 @@ func TestRepublisher(t *testing.T) { t.Skip("dont run timing tests in CI") } - ctx := context.TODO() - pub := make(chan struct{}) pf := func(ctx context.Context, c cid.Cid) error { - pub <- struct{}{} + select { + case pub <- struct{}{}: + case <-ctx.Done(): + return ctx.Err() + } return nil } @@ -29,8 +32,7 @@ func TestRepublisher(t *testing.T) { tshort := time.Millisecond * 50 tlong := time.Second / 2 - rp := NewRepublisher(ctx, pf, tshort, tlong) - go rp.Run(cid.Undef) + rp := NewRepublisher(pf, tshort, tlong, cid.Undef) rp.Update(testCid1) @@ -41,16 +43,17 @@ func TestRepublisher(t *testing.T) { case <-pub: } - cctx, cancel := context.WithCancel(context.Background()) - + stopUpdates := make(chan struct{}) go func() { + timer := time.NewTimer(time.Hour) + defer timer.Stop() for { rp.Update(testCid2) - time.Sleep(time.Millisecond * 10) + timer.Reset(time.Millisecond * 10) select { - case <-cctx.Done(): + case <-timer.C: + case <-stopUpdates: return - default: } } }() @@ -66,10 +69,33 @@ func TestRepublisher(t *testing.T) { t.Fatal("waited too long for pub!") } - cancel() + close(stopUpdates) - err := rp.Close() - if err != nil { - t.Fatal(err) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + + // Check that republishing update does not call pubfunc again + rp.Update(testCid2) + err := rp.WaitPub(context.Background()) + require.NoError(t, err) + select { + case <-pub: + t.Fatal("pub func called again with repeated update") + case <-time.After(tlong * 2): } + + // Check that waitpub times out when blocked pubfunc is called + rp.Update(testCid1) + err = rp.WaitPub(ctx) + require.ErrorIs(t, err, context.DeadlineExceeded) + + // Unblock pubfunc. + <-pub + + err = rp.Close() + require.NoError(t, err) + + // Check that additional call to Close is OK after republisher stopped. + err = rp.Close() + require.NoError(t, err) } diff --git a/mfs/root.go b/mfs/root.go index 5a7cb7ed1..e584b6e06 100644 --- a/mfs/root.go +++ b/mfs/root.go @@ -64,6 +64,11 @@ const ( TDir ) +const ( + repubQuick = 300 * time.Millisecond + repubLong = 3 * time.Second +) + // FSNode abstracts the `Directory` and `File` structures, it represents // any child node in the MFS (i.e., all the nodes besides the `Root`). It // is the counterpart of the `parent` interface which represents any @@ -100,12 +105,7 @@ type Root struct { func NewRoot(parent context.Context, ds ipld.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) { var repub *Republisher if pf != nil { - repub = NewRepublisher(parent, pf, time.Millisecond*300, time.Second*3) - - // No need to take the lock here since we just created - // the `Republisher` and no one has access to it yet. - - go repub.Run(node.Cid()) + repub = NewRepublisher(pf, repubQuick, repubLong, node.Cid()) } root := &Root{ @@ -177,10 +177,7 @@ func (kr *Root) FlushMemFree(ctx context.Context) error { dir.lock.Lock() defer dir.lock.Unlock() - for name := range dir.entriesCache { - delete(dir.entriesCache, name) - } - // TODO: Can't we just create new maps? + clear(dir.entriesCache) return nil } diff --git a/namesys/republisher/repub.go b/namesys/republisher/repub.go index 7ca2ae932..95e440436 100644 --- a/namesys/republisher/repub.go +++ b/namesys/republisher/repub.go @@ -16,8 +16,6 @@ import ( "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" - "github.com/jbenet/goprocess" - gpctx "github.com/jbenet/goprocess/context" ic "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" ) @@ -67,8 +65,17 @@ func NewRepublisher(ns namesys.Publisher, ds ds.Datastore, self ic.PrivKey, ks k } } -// Run starts the republisher facility. It can be stopped by stopping the provided proc. -func (rp *Republisher) Run(proc goprocess.Process) { +// Run starts the republisher facility. It can be stopped by calling the returned function.. +func (rp *Republisher) Run() func() { + ctx, cancel := context.WithCancel(context.Background()) + go rp.run(ctx) + return func() { + log.Debug("stopping republisher") + cancel() + } +} + +func (rp *Republisher) run(ctx context.Context) { timer := time.NewTimer(InitialRebroadcastDelay) defer timer.Stop() if rp.Interval < InitialRebroadcastDelay { @@ -79,21 +86,21 @@ func (rp *Republisher) Run(proc goprocess.Process) { select { case <-timer.C: timer.Reset(rp.Interval) - err := rp.republishEntries(proc) + err := rp.republishEntries(ctx) if err != nil { log.Info("republisher failed to republish: ", err) if FailureRetryInterval < rp.Interval { timer.Reset(FailureRetryInterval) } } - case <-proc.Closing(): + case <-ctx.Done(): return } } } -func (rp *Republisher) republishEntries(p goprocess.Process) error { - ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p)) +func (rp *Republisher) republishEntries(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) defer cancel() ctx, span := startSpan(ctx, "Republisher.RepublishEntries") defer span.End() diff --git a/namesys/republisher/repub_test.go b/namesys/republisher/repub_test.go index 88ec04dae..db42b02a9 100644 --- a/namesys/republisher/repub_test.go +++ b/namesys/republisher/repub_test.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/jbenet/goprocess" "github.com/libp2p/go-libp2p" dht "github.com/libp2p/go-libp2p-kad-dht" ic "github.com/libp2p/go-libp2p/core/crypto" @@ -125,8 +124,8 @@ func TestRepublish(t *testing.T) { repub.Interval = time.Second repub.RecordLifetime = time.Second * 5 - proc := goprocess.Go(repub.Run) - defer proc.Close() + stop := repub.Run() + defer stop() // now wait a couple seconds for it to fire time.Sleep(time.Second * 2) @@ -182,8 +181,8 @@ func TestLongEOLRepublish(t *testing.T) { repub.Interval = time.Millisecond * 500 repub.RecordLifetime = time.Second - proc := goprocess.Go(repub.Run) - defer proc.Close() + stop := repub.Run() + defer stop() // now wait a couple seconds for it to fire a few times time.Sleep(time.Second * 2) diff --git a/pinning/pinner/dspinner/pin.go b/pinning/pinner/dspinner/pin.go index bc1f61902..ddc93c2c5 100644 --- a/pinning/pinner/dspinner/pin.go +++ b/pinning/pinner/dspinner/pin.go @@ -707,11 +707,19 @@ func (p *pinner) streamIndex(ctx context.Context, index dsindex.Indexer, detaile defer p.lock.RUnlock() cidSet := cid.NewSet() + send := func(sp ipfspinner.StreamedPin) (ok bool) { + select { + case <-ctx.Done(): + return false + case out <- sp: + return true + } + } err := index.ForEach(ctx, "", func(key, value string) bool { c, err := cid.Cast([]byte(key)) if err != nil { - out <- ipfspinner.StreamedPin{Err: err} + send(ipfspinner.StreamedPin{Err: err}) return false } @@ -719,7 +727,7 @@ func (p *pinner) streamIndex(ctx context.Context, index dsindex.Indexer, detaile if detailed { pp, err := p.loadPin(ctx, value) if err != nil { - out <- ipfspinner.StreamedPin{Err: err} + send(ipfspinner.StreamedPin{Err: err}) return false } @@ -731,17 +739,16 @@ func (p *pinner) streamIndex(ctx context.Context, index dsindex.Indexer, detaile } if !cidSet.Has(c) { - select { - case <-ctx.Done(): + if !send(ipfspinner.StreamedPin{Pin: pin}) { return false - case out <- ipfspinner.StreamedPin{Pin: pin}: } cidSet.Add(c) } return true }) if err != nil { - out <- ipfspinner.StreamedPin{Err: err} + send(ipfspinner.StreamedPin{Err: err}) + return } }() diff --git a/pinning/remote/client/client.go b/pinning/remote/client/client.go index e1da64ffb..3b6270093 100644 --- a/pinning/remote/client/client.go +++ b/pinning/remote/client/client.go @@ -2,18 +2,16 @@ package go_pinning_service_http_client import ( "context" + "errors" "fmt" "net/http" "time" - "github.com/pkg/errors" - "github.com/ipfs/boxo/pinning/remote/client/openapi" "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multibase" - - logging "github.com/ipfs/go-log/v2" ) var logger = logging.Logger("pinning-service-http-client") @@ -139,77 +137,90 @@ func (pinLsOpts) LsMeta(meta map[string]string) LsOption { type pinResults = openapi.PinResults -func (c *Client) Ls(ctx context.Context, opts ...LsOption) (chan PinStatusGetter, chan error) { - res := make(chan PinStatusGetter, 1) - errs := make(chan error, 1) - +// Ls writes pin statuses to the PinStatusGetter channel. The channel is +// closed when there are no more pins. If an error occurs or ctx is canceled, +// then the channel is closed and an error is returned. +// +// Example: +// +// res := make(chan PinStatusGetter, 1) +// lsErr := make(chan error, 1) +// go func() { +// lsErr <- c.Ls(ctx, res, opts...) +// }() +// for r := range res { +// processPin(r) +// } +// return <-lsErr +func (c *Client) Ls(ctx context.Context, res chan<- PinStatusGetter, opts ...LsOption) (err error) { settings := new(lsSettings) for _, o := range opts { - if err := o(settings); err != nil { + if err = o(settings); err != nil { close(res) - errs <- err - close(errs) - return res, errs + return err } } - go func() { - defer func() { - if r := recover(); r != nil { - var err error - switch x := r.(type) { - case string: - err = fmt.Errorf("unexpected error while listing remote pins: %s", x) - case error: - err = fmt.Errorf("unexpected error while listing remote pins: %w", x) - default: - err = errors.New("unknown panic while listing remote pins") - } - errs <- err - } - close(errs) - close(res) - }() - - for { - pinRes, err := c.lsInternal(ctx, settings) - if err != nil { - errs <- err - return + defer func() { + if r := recover(); r != nil { + switch x := r.(type) { + case string: + err = fmt.Errorf("unexpected error while listing remote pins: %s", x) + case error: + err = fmt.Errorf("unexpected error while listing remote pins: %w", x) + default: + err = errors.New("unknown panic while listing remote pins") } + } + close(res) + }() - results := pinRes.GetResults() - for _, r := range results { - select { - case res <- &pinStatusObject{r}: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } + for { + pinRes, err := c.lsInternal(ctx, settings) + if err != nil { + return err + } - batchSize := len(results) - if int(pinRes.Count) == batchSize { - // no more batches - return + results := pinRes.GetResults() + for _, r := range results { + select { + case res <- &pinStatusObject{r}: + case <-ctx.Done(): + return ctx.Err() } + } - // Better DX/UX for cases like https://github.com/application-research/estuary/issues/124 - if batchSize == 0 && int(pinRes.Count) != 0 { - errs <- fmt.Errorf("invalid pinning service response: PinResults.count=%d but no PinResults.results", int(pinRes.Count)) - return - } + batchSize := len(results) + if int(pinRes.Count) == batchSize { + // no more batches + return nil + } - oldestResult := results[batchSize-1] - settings.before = &oldestResult.Created + // Better DX/UX for cases like https://github.com/application-research/estuary/issues/124 + if batchSize == 0 && int(pinRes.Count) != 0 { + return fmt.Errorf("invalid pinning service response: PinResults.count=%d but no PinResults.results", int(pinRes.Count)) } + + oldestResult := results[batchSize-1] + settings.before = &oldestResult.Created + } +} + +// GoLs creates the results and error channels, starts the goroutine that calls +// Ls, and returns the channels to the caller. +func (c *Client) GoLs(ctx context.Context, opts ...LsOption) (<-chan PinStatusGetter, <-chan error) { + res := make(chan PinStatusGetter) + errs := make(chan error, 1) + + go func() { + errs <- c.Ls(ctx, res, opts...) }() return res, errs } func (c *Client) LsSync(ctx context.Context, opts ...LsOption) ([]PinStatusGetter, error) { - resCh, errCh := c.Ls(ctx, opts...) + resCh, errCh := c.GoLs(ctx, opts...) var res []PinStatusGetter for r := range resCh { @@ -221,8 +232,6 @@ func (c *Client) LsSync(ctx context.Context, opts ...LsOption) ([]PinStatusGette // Manual version of Ls that returns a single batch of results and int with total count func (c *Client) LsBatchSync(ctx context.Context, opts ...LsOption) ([]PinStatusGetter, int, error) { - var res []PinStatusGetter - settings := new(lsSettings) for _, o := range opts { if err := o(settings); err != nil { @@ -235,9 +244,13 @@ func (c *Client) LsBatchSync(ctx context.Context, opts ...LsOption) ([]PinStatus return nil, 0, err } + var res []PinStatusGetter results := pinRes.GetResults() - for _, r := range results { - res = append(res, &pinStatusObject{r}) + if len(results) != 0 { + res = make([]PinStatusGetter, len(results)) + for i, r := range results { + res[i] = &pinStatusObject{r} + } } return res, int(pinRes.Count), nil @@ -276,8 +289,7 @@ func (c *Client) lsInternal(ctx context.Context, settings *lsSettings) (pinResul // TODO: Ignoring HTTP Response OK? results, httpresp, err := getter.Execute() if err != nil { - err := httperr(httpresp, err) - return pinResults{}, err + return pinResults{}, httperr(httpresp, err) } return results, nil @@ -418,13 +430,13 @@ func httperr(resp *http.Response, e error) error { if ok { ferr, ok := oerr.Model().(openapi.Failure) if ok { - return errors.Wrapf(e, "reason: %q, details: %q", ferr.Error.GetReason(), ferr.Error.GetDetails()) + return fmt.Errorf("reason: %q, details: %q: %w", ferr.Error.GetReason(), ferr.Error.GetDetails(), e) } } if resp == nil { - return errors.Wrapf(e, "empty response from remote pinning service") + return fmt.Errorf("empty response from remote pinning service: %w", e) } - return errors.Wrapf(e, "remote pinning service returned http error %d", resp.StatusCode) + return fmt.Errorf("remote pinning service returned http error %d: %w", resp.StatusCode, e) } diff --git a/pinning/remote/client/openapi/README.md b/pinning/remote/client/openapi/README.md index eddae08b7..7c5c3ae0f 100644 --- a/pinning/remote/client/openapi/README.md +++ b/pinning/remote/client/openapi/README.md @@ -1,7 +1,5 @@ # Go API client for openapi - - ## About this spec The IPFS Pinning Service API is intended to be an implementation-agnostic API: - For use and implementation by pinning service providers @@ -15,8 +13,10 @@ This section describes the most important object types and conventions. A full list of fields and schemas can be found in the `schemas` section of the [YAML file](https://github.com/ipfs/pinning-services-api-spec/blob/master/ipfs-pinning-service.yaml). ## Identifiers + ### cid [Content Identifier (CID)](https://docs.ipfs.io/concepts/content-addressing/) points at the root of a DAG that is pinned recursively. + ### requestid Unique identifier of a pin request. @@ -25,6 +25,7 @@ When a pin is created, the service responds with unique `requestid` that can be Service implementation should use UUID, `hash(accessToken,Pin,PinStatus.created)`, or any other opaque identifier that provides equally strong protection against race conditions. ## Objects + ### Pin object ![pin object](https://bafybeideck2fchyxna4wqwc2mo67yriokehw3yujboc5redjdaajrk2fjq.ipfs.dweb.link/pin.png) @@ -46,7 +47,7 @@ It includes the original `pin` object, along with the current `status` and globa ### Creating a new pin object The user sends a `Pin` object to `POST /pins` and receives a `PinStatus` response: -- `requestid` in `PinStatus` is the identifier of the pin operation, which can can be used for checking status, and removing the pin in the future +- `requestid` in `PinStatus` is the identifier of the pin operation, which can be used for checking status, and removing the pin in the future - `status` in `PinStatus` indicates the current state of a pin ### Checking status of in-progress pinning @@ -60,7 +61,6 @@ The user can replace an existing pin object via `POST /pins/{requestid}`. This i ### Removing a pin object A pin object can be removed via `DELETE /pins/{requestid}`. - ## Provider hints Pinning of new data can be accelerated by providing a list of known data sources in `Pin.origins`, and connecting at least one of them to pinning service nodes at `PinStatus.delegates`. @@ -99,8 +99,6 @@ Pin objects can be listed by executing `GET /pins` with optional parameters: > **Note**: pagination by the `created` timestamp requires each value to be globally unique. Any future considerations to add support for bulk creation must account for this. - - ## Overview This API client was generated by the [OpenAPI Generator](https://openapi-generator.tech) project. By using the [OpenAPI-spec](https://www.openapis.org/) from a remote server, you can easily generate an API client. @@ -177,7 +175,6 @@ Class | Method | HTTP request | Description *PinsApi* | [**PinsRequestidGet**](docs/PinsApi.md#pinsrequestidget) | **Get** /pins/{requestid} | Get pin object *PinsApi* | [**PinsRequestidPost**](docs/PinsApi.md#pinsrequestidpost) | **Post** /pins/{requestid} | Replace pin object - ## Documentation For Models - [Failure](docs/Failure.md) @@ -190,11 +187,8 @@ Class | Method | HTTP request | Description ## Documentation For Authorization - - ### accessToken - ## Documentation for Utility Methods Due to the fact that model structure members are all pointers, this package contains @@ -212,6 +206,3 @@ Each of these functions takes a value of the given basic type and returns a poin * `PtrTime` ## Author - - - diff --git a/provider/noop.go b/provider/noop.go index 5367ccb30..50c3e3502 100644 --- a/provider/noop.go +++ b/provider/noop.go @@ -19,7 +19,7 @@ func (op *noopProvider) Close() error { return nil } -func (op *noopProvider) Provide(cid.Cid) error { +func (op *noopProvider) Provide(context.Context, cid.Cid, bool) error { return nil } diff --git a/provider/provider.go b/provider/provider.go index a20a805cb..4197f3dae 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -18,7 +18,7 @@ var logR = logging.Logger("reprovider.simple") // Provider announces blocks to the network type Provider interface { // Provide takes a cid and makes an attempt to announce it to the network - Provide(cid.Cid) error + Provide(context.Context, cid.Cid, bool) error } // Reprovider reannounces blocks to the network diff --git a/provider/reprovider.go b/provider/reprovider.go index 1f3e1580a..048a2067d 100644 --- a/provider/reprovider.go +++ b/provider/reprovider.go @@ -183,6 +183,17 @@ func DatastorePrefix(k datastore.Key) Option { } } +// MaxBatchSize limit how big each batch is. +// Some content routers like acceleratedDHTClient have sub linear scalling and +// bigger sizes are thus faster per elements however smaller batch sizes can +// limit memory usage spike. +func MaxBatchSize(n uint) Option { + return func(system *reprovider) error { + system.maxReprovideBatchSize = n + return nil + } +} + // ThroughputReport will fire the callback synchronously once at least limit // multihashes have been advertised, it will then wait until a new set of at least // limit multihashes has been advertised. @@ -444,7 +455,7 @@ func (s *reprovider) Close() error { return err } -func (s *reprovider) Provide(cid cid.Cid) error { +func (s *reprovider) Provide(ctx context.Context, cid cid.Cid, announce bool) error { return s.q.Enqueue(cid) } diff --git a/provider/reprovider_test.go b/provider/reprovider_test.go index 4ae58148e..ceb72f97b 100644 --- a/provider/reprovider_test.go +++ b/provider/reprovider_test.go @@ -198,7 +198,7 @@ func TestOfflineRecordsThenOnlineRepublish(t *testing.T) { sys, err := New(ds) assert.NoError(t, err) - err = sys.Provide(c) + err = sys.Provide(context.Background(), c, true) assert.NoError(t, err) err = sys.Close() diff --git a/routing/http/client/client.go b/routing/http/client/client.go index 16840cab5..e52079f7b 100644 --- a/routing/http/client/client.go +++ b/routing/http/client/client.go @@ -9,12 +9,15 @@ import ( "io" "mime" "net/http" + gourl "net/url" + "sort" "strings" "time" "github.com/benbjohnson/clock" ipns "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/routing/http/contentrouter" + "github.com/ipfs/boxo/routing/http/filters" "github.com/ipfs/boxo/routing/http/internal/drjson" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" @@ -30,6 +33,8 @@ import ( var ( _ contentrouter.Client = &Client{} logger = logging.Logger("routing/http/client") + + DefaultProtocolFilter = []string{"unknown", "transport-bitswap"} // IPIP-484 ) const ( @@ -52,6 +57,11 @@ type Client struct { // for testing, e.g., testing the server with a mangled signature. //lint:ignore SA1019 // ignore staticcheck afterSignCallback func(req *types.WriteBitswapRecord) + + // disableLocalFiltering is used to disable local filtering of the results + disableLocalFiltering bool + protocolFilter []string + addrFilter []string } // defaultUserAgent is used as a fallback to inform HTTP server which library @@ -83,6 +93,37 @@ func WithIdentity(identity crypto.PrivKey) Option { } } +// WithDisabledLocalFiltering disables local filtering of the results. +// This should be used for delegated routing servers that already implement filtering +func WithDisabledLocalFiltering(val bool) Option { + return func(c *Client) error { + c.disableLocalFiltering = val + return nil + } +} + +// WithProtocolFilter adds a protocol filter to the client. +// The protocol filter is added to the request URL. +// The protocols are ordered alphabetically for cache key (url) consistency +func WithProtocolFilter(protocolFilter []string) Option { + return func(c *Client) error { + sort.Strings(protocolFilter) + c.protocolFilter = protocolFilter + return nil + } +} + +// WithAddrFilter adds an address filter to the client. +// The address filter is added to the request URL. +// The addresses are ordered alphabetically for cache key (url) consistency +func WithAddrFilter(addrFilter []string) Option { + return func(c *Client) error { + sort.Strings(addrFilter) + c.addrFilter = addrFilter + return nil + } +} + // WithHTTPClient sets a custom HTTP Client to be used with [Client]. func WithHTTPClient(h httpClient) Option { return func(c *Client) error { @@ -136,10 +177,11 @@ func WithStreamResultsRequired() Option { // The Provider and identity parameters are option. If they are nil, the [client.ProvideBitswap] method will not function. func New(baseURL string, opts ...Option) (*Client, error) { client := &Client{ - baseURL: baseURL, - httpClient: newDefaultHTTPClient(defaultUserAgent), - clock: clock.New(), - accepts: strings.Join([]string{mediaTypeNDJSON, mediaTypeJSON}, ","), + baseURL: baseURL, + httpClient: newDefaultHTTPClient(defaultUserAgent), + clock: clock.New(), + accepts: strings.Join([]string{mediaTypeNDJSON, mediaTypeJSON}, ","), + protocolFilter: DefaultProtocolFilter, // can be customized via WithProtocolFilter } for _, opt := range opts { @@ -184,7 +226,12 @@ func (c *Client) FindProviders(ctx context.Context, key cid.Cid) (providers iter // TODO test measurements m := newMeasurement("FindProviders") - url := c.baseURL + "/routing/v1/providers/" + key.String() + url, err := gourl.JoinPath(c.baseURL, "routing/v1/providers", key.String()) + if err != nil { + return nil, err + } + url = filters.AddFiltersToURL(url, c.protocolFilter, c.addrFilter) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err @@ -251,6 +298,10 @@ func (c *Client) FindProviders(ctx context.Context, key cid.Cid) (providers iter return nil, errors.New("unknown content type") } + if !c.disableLocalFiltering { + it = filters.ApplyFiltersToIter(it, c.addrFilter, c.protocolFilter) + } + return &measuringIter[iter.Result[types.Record]]{Iter: it, ctx: ctx, m: m}, nil } @@ -356,7 +407,12 @@ func (c *Client) provideSignedBitswapRecord(ctx context.Context, bswp *types.Wri func (c *Client) FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultIter[*types.PeerRecord], err error) { m := newMeasurement("FindPeers") - url := c.baseURL + "/routing/v1/peers/" + peer.ToCid(pid).String() + url, err := gourl.JoinPath(c.baseURL, "routing/v1/peers", peer.ToCid(pid).String()) + if err != nil { + return nil, err + } + url = filters.AddFiltersToURL(url, c.protocolFilter, c.addrFilter) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return nil, err @@ -423,6 +479,10 @@ func (c *Client) FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultI return nil, errors.New("unknown content type") } + if !c.disableLocalFiltering { + it = filters.ApplyFiltersToPeerRecordIter(it, c.addrFilter, c.protocolFilter) + } + return &measuringIter[iter.Result[*types.PeerRecord]]{Iter: it, ctx: ctx, m: m}, nil } diff --git a/routing/http/client/client_test.go b/routing/http/client/client_test.go index 732861797..da36f666f 100644 --- a/routing/http/client/client_test.go +++ b/routing/http/client/client_test.go @@ -49,7 +49,8 @@ func (m *mockContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit in func (m *mockContentRouter) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { args := m.Called(ctx, name) - return args.Get(0).(*ipns.Record), args.Error(1) + rec, _ := args.Get(0).(*ipns.Record) + return rec, args.Error(1) } func (m *mockContentRouter) PutIPNS(ctx context.Context, name ipns.Name, record *ipns.Record) error { @@ -158,12 +159,12 @@ func addrsToDRAddrs(addrs []multiaddr.Multiaddr) (drmas []types.Multiaddr) { return } -func makePeerRecord() types.PeerRecord { +func makePeerRecord(protocols []string) types.PeerRecord { peerID, addrs, _ := makeProviderAndIdentity() return types.PeerRecord{ Schema: types.SchemaPeer, ID: &peerID, - Protocols: []string{"transport-bitswap"}, + Protocols: protocols, Addrs: addrsToDRAddrs(addrs), Extra: map[string]json.RawMessage{}, } @@ -196,7 +197,7 @@ func makeProviderAndIdentity() (peer.ID, []multiaddr.Multiaddr, crypto.PrivKey) panic(err) } - ma2, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/tcp/4002") + ma2, err := multiaddr.NewMultiaddr("/ip4/0.0.0.0/udp/4002") if err != nil { panic(err) } @@ -222,9 +223,22 @@ func (e *osErrContains) errContains(t *testing.T, err error) { } func TestClient_FindProviders(t *testing.T) { - peerRecord := makePeerRecord() + unknownPeerRecord := makePeerRecord([]string{}) + bitswapPeerRecord := makePeerRecord([]string{"transport-bitswap"}) + httpPeerRecord := makePeerRecord([]string{"transport-ipfs-gateway-http"}) + fooPeerRecord := makePeerRecord([]string{"transport-foo"}) + peerProviders := []iter.Result[types.Record]{ - {Val: &peerRecord}, + {Val: &unknownPeerRecord}, + {Val: &bitswapPeerRecord}, + {Val: &httpPeerRecord}, + {Val: &fooPeerRecord}, + } + + // DefaultProtocolFilter + defaultFilterPeerProviders := []iter.Result[types.Record]{ + {Val: &unknownPeerRecord}, + {Val: &bitswapPeerRecord}, } bitswapRecord := makeBitswapRecord() @@ -238,6 +252,7 @@ func TestClient_FindProviders(t *testing.T) { routerErr error clientRequiresStreaming bool serverStreamingDisabled bool + filterProtocols []string expErrContains osErrContains expResult []iter.Result[types.Record] @@ -245,11 +260,25 @@ func TestClient_FindProviders(t *testing.T) { expJSONResponse bool }{ { - name: "happy case", + name: "happy case with DefaultProtocolFilter", + routerResult: peerProviders, + expResult: defaultFilterPeerProviders, + expStreamingResponse: true, + }, + { + name: "pass through with protocol filter disabled", + filterProtocols: []string{}, routerResult: peerProviders, expResult: peerProviders, expStreamingResponse: true, }, + { + name: "happy case with custom protocol filter", + filterProtocols: []string{"transport-foo"}, + routerResult: peerProviders, + expResult: []iter.Result[types.Record]{{Val: &fooPeerRecord}}, + expStreamingResponse: true, + }, { name: "happy case (with deprecated bitswap schema)", routerResult: []iter.Result[types.Record]{{Val: &bitswapRecord}}, @@ -259,7 +288,7 @@ func TestClient_FindProviders(t *testing.T) { { name: "server doesn't support streaming", routerResult: peerProviders, - expResult: peerProviders, + expResult: defaultFilterPeerProviders, serverStreamingDisabled: true, expJSONResponse: true, }, @@ -305,6 +334,10 @@ func TestClient_FindProviders(t *testing.T) { }) } + if c.filterProtocols != nil { + clientOpts = append(clientOpts, WithProtocolFilter(c.filterProtocols)) + } + if c.expStreamingResponse { onRespReceived = append(onRespReceived, func(r *http.Response) { assert.Equal(t, mediaTypeNDJSON, r.Header.Get("Content-Type")) @@ -482,11 +515,25 @@ func TestClient_Provide(t *testing.T) { } func TestClient_FindPeers(t *testing.T) { - peerRecord := makePeerRecord() + unknownPeerRecord := makePeerRecord([]string{}) + bitswapPeerRecord := makePeerRecord([]string{"transport-bitswap"}) + httpPeerRecord := makePeerRecord([]string{"transport-ipfs-gateway-http"}) + fooPeerRecord := makePeerRecord([]string{"transport-foo"}) + peerRecords := []iter.Result[*types.PeerRecord]{ - {Val: &peerRecord}, + {Val: &unknownPeerRecord}, + {Val: &bitswapPeerRecord}, + {Val: &httpPeerRecord}, + {Val: &fooPeerRecord}, + } + + // DefaultProtocolFilter + defaultFilterPeerRecords := []iter.Result[*types.PeerRecord]{ + {Val: &unknownPeerRecord}, + {Val: &bitswapPeerRecord}, } - pid := *peerRecord.ID + + pid := *bitswapPeerRecord.ID cases := []struct { name string @@ -496,6 +543,7 @@ func TestClient_FindPeers(t *testing.T) { routerErr error clientRequiresStreaming bool serverStreamingDisabled bool + filterProtocols []string expErrContains osErrContains expResult []iter.Result[*types.PeerRecord] @@ -503,15 +551,29 @@ func TestClient_FindPeers(t *testing.T) { expJSONResponse bool }{ { - name: "happy case", + name: "happy case with DefaultProtocolFilter", + routerResult: peerRecords, + expResult: defaultFilterPeerRecords, + expStreamingResponse: true, + }, + { + name: "pass through with protocol filter disabled", + filterProtocols: []string{}, routerResult: peerRecords, expResult: peerRecords, expStreamingResponse: true, }, + { + name: "happy case with custom protocol filter", + filterProtocols: []string{"transport-foo"}, + routerResult: peerRecords, + expResult: []iter.Result[*types.PeerRecord]{{Val: &fooPeerRecord}}, + expStreamingResponse: true, + }, { name: "server doesn't support streaming", routerResult: peerRecords, - expResult: peerRecords, + expResult: defaultFilterPeerRecords, serverStreamingDisabled: true, expJSONResponse: true, }, @@ -542,12 +604,10 @@ func TestClient_FindPeers(t *testing.T) { } for _, c := range cases { t.Run(c.name, func(t *testing.T) { - var ( - clientOpts []Option - serverOpts []server.Option - onRespReceived []func(*http.Response) - onReqReceived []func(*http.Request) - ) + var clientOpts []Option + var serverOpts []server.Option + var onRespReceived []func(*http.Response) + var onReqReceived []func(*http.Request) if c.serverStreamingDisabled { serverOpts = append(serverOpts, server.WithStreamingResultsDisabled()) @@ -560,6 +620,10 @@ func TestClient_FindPeers(t *testing.T) { }) } + if c.filterProtocols != nil { + clientOpts = append(clientOpts, WithProtocolFilter(c.filterProtocols)) + } + if c.expStreamingResponse { onRespReceived = append(onRespReceived, func(r *http.Response) { assert.Equal(t, mediaTypeNDJSON, r.Header.Get("Content-Type")) @@ -603,7 +667,7 @@ func TestClient_FindPeers(t *testing.T) { resultIter, err := client.FindPeers(ctx, pid) c.expErrContains.errContains(t, err) - results := iter.ReadAll[iter.Result[*types.PeerRecord]](resultIter) + results := iter.ReadAll(resultIter) assert.Equal(t, c.expResult, results) }) } diff --git a/routing/http/server/filters.go b/routing/http/filters/filters.go similarity index 75% rename from routing/http/server/filters.go rename to routing/http/filters/filters.go index bb5dfa0d5..122f625de 100644 --- a/routing/http/server/filters.go +++ b/routing/http/filters/filters.go @@ -1,24 +1,54 @@ -package server +package filters import ( + "net/url" "reflect" "slices" "strings" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" + logging "github.com/ipfs/go-log/v2" "github.com/multiformats/go-multiaddr" ) -// filters implements IPIP-0484 +var logger = logging.Logger("routing/http/filters") -func parseFilter(param string) []string { +// Package filters implements IPIP-0484 + +func ParseFilter(param string) []string { if param == "" { return nil } return strings.Split(strings.ToLower(param), ",") } +func AddFiltersToURL(baseURL string, protocolFilter, addrFilter []string) string { + parsedURL, err := url.Parse(baseURL) + if err != nil { + return baseURL + } + + query := parsedURL.Query() + + if len(protocolFilter) > 0 { + query.Set("filter-protocols", strings.Join(protocolFilter, ",")) + } + + if len(addrFilter) > 0 { + query.Set("filter-addrs", strings.Join(addrFilter, ",")) + } + + // The comma is in the "sub-delims" set of characters that don't need to be + // encoded in most parts of a URL, including query parameters. Golang + // standard library percent-escapes it for consistency, but we prefer + // human-readable /routing/v1 URLs, and real comma is restored here to + // ensure human and machine requests hit the same HTTP cache keys. + parsedURL.RawQuery = strings.ReplaceAll(query.Encode(), "%2C", ",") + + return parsedURL.String() +} + // applyFiltersToIter applies the filters to the given iterator and returns a new iterator. // // The function iterates over the input iterator, applying the specified filters to each record. @@ -28,7 +58,7 @@ func parseFilter(param string) []string { // - recordsIter: An iterator of types.Record to be filtered. // - filterAddrs: A slice of strings representing the address filter criteria. // - filterProtocols: A slice of strings representing the protocol filter criteria. -func applyFiltersToIter(recordsIter iter.ResultIter[types.Record], filterAddrs, filterProtocols []string) iter.ResultIter[types.Record] { +func ApplyFiltersToIter(recordsIter iter.ResultIter[types.Record], filterAddrs, filterProtocols []string) iter.ResultIter[types.Record] { mappedIter := iter.Map(recordsIter, func(v iter.Result[types.Record]) iter.Result[types.Record] { if v.Err != nil || v.Val == nil { return v @@ -76,6 +106,30 @@ func applyFiltersToIter(recordsIter iter.ResultIter[types.Record], filterAddrs, return filteredIter } +func ApplyFiltersToPeerRecordIter(peerRecordIter iter.ResultIter[*types.PeerRecord], filterAddrs, filterProtocols []string) iter.ResultIter[*types.PeerRecord] { + // Convert PeerRecord to Record so that we can reuse the filtering logic from findProviders + mappedIter := iter.Map(peerRecordIter, func(v iter.Result[*types.PeerRecord]) iter.Result[types.Record] { + if v.Err != nil || v.Val == nil { + return iter.Result[types.Record]{Err: v.Err} + } + + var record types.Record = v.Val + return iter.Result[types.Record]{Val: record} + }) + + filteredIter := ApplyFiltersToIter(mappedIter, filterAddrs, filterProtocols) + + // Convert Record back to PeerRecord 🙃 + return iter.Map(filteredIter, func(v iter.Result[types.Record]) iter.Result[*types.PeerRecord] { + if v.Err != nil || v.Val == nil { + return iter.Result[*types.PeerRecord]{Err: v.Err} + } + + var record *types.PeerRecord = v.Val.(*types.PeerRecord) + return iter.Result[*types.PeerRecord]{Val: record} + }) +} + // Applies the filters. Returns nil if the provider does not pass the protocols filter // The address filter is more complicated because it potentially modifies the Addrs slice. func applyFilters(provider *types.PeerRecord, filterAddrs, filterProtocols []string) *types.PeerRecord { diff --git a/routing/http/server/filters_test.go b/routing/http/filters/filters_test.go similarity index 82% rename from routing/http/server/filters_test.go rename to routing/http/filters/filters_test.go index 078e4aa96..d86316045 100644 --- a/routing/http/server/filters_test.go +++ b/routing/http/filters/filters_test.go @@ -1,4 +1,4 @@ -package server +package filters import ( "testing" @@ -10,6 +10,59 @@ import ( "github.com/stretchr/testify/require" ) +func TestAddFiltersToURL(t *testing.T) { + testCases := []struct { + name string + baseURL string + protocolFilter []string + addrFilter []string + expected string + }{ + { + name: "No filters", + baseURL: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + protocolFilter: nil, + addrFilter: nil, + expected: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + }, + { + name: "Only protocol filter", + baseURL: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + protocolFilter: []string{"transport-bitswap", "transport-ipfs-gateway-http"}, + addrFilter: nil, + expected: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi?filter-protocols=transport-bitswap,transport-ipfs-gateway-http", + }, + { + name: "Only addr filter", + baseURL: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + protocolFilter: nil, + addrFilter: []string{"ip4", "ip6"}, + expected: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi?filter-addrs=ip4,ip6", + }, + { + name: "Both filters", + baseURL: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi", + protocolFilter: []string{"transport-bitswap", "transport-graphsync-filecoinv1"}, + addrFilter: []string{"ip4", "ip6"}, + expected: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi?filter-addrs=ip4,ip6&filter-protocols=transport-bitswap,transport-graphsync-filecoinv1", + }, + { + name: "URL with existing query parameters", + baseURL: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi?existing=param", + protocolFilter: []string{"transport-bitswap"}, + addrFilter: []string{"ip4"}, + expected: "https://example.com/routing/v1/providers/bafybeigdyrzt5sfp7udm7hu76uh7y26nf3efuylqabf3oclgtqy55fbzdi?existing=param&filter-addrs=ip4&filter-protocols=transport-bitswap", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := AddFiltersToURL(tc.baseURL, tc.protocolFilter, tc.addrFilter) + assert.Equal(t, tc.expected, result) + }) + } +} + func TestApplyAddrFilter(t *testing.T) { // Create some test multiaddrs addr1, _ := multiaddr.NewMultiaddr("/ip4/127.0.0.1/tcp/4001/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt") diff --git a/routing/http/server/server.go b/routing/http/server/server.go index 3cdcefee0..c951701c3 100644 --- a/routing/http/server/server.go +++ b/routing/http/server/server.go @@ -10,11 +10,13 @@ import ( "mime" "net/http" "strings" + "sync/atomic" "time" "github.com/cespare/xxhash/v2" "github.com/gorilla/mux" "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/routing/http/filters" "github.com/ipfs/boxo/routing/http/internal/drjson" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" @@ -24,8 +26,12 @@ import ( "github.com/libp2p/go-libp2p/core/routing" "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multibase" + "github.com/prometheus/client_golang/prometheus" logging "github.com/ipfs/go-log/v2" + metrics "github.com/slok/go-http-metrics/metrics/prometheus" + "github.com/slok/go-http-metrics/middleware" + middlewarestd "github.com/slok/go-http-metrics/middleware/std" ) const ( @@ -36,6 +42,7 @@ const ( DefaultRecordsLimit = 20 DefaultStreamingRecordsLimit = 0 + DefaultRoutingTimeout = 30 * time.Second ) var logger = logging.Logger("routing/http/server") @@ -121,31 +128,75 @@ func WithStreamingRecordsLimit(limit int) Option { } } +func WithPrometheusRegistry(reg prometheus.Registerer) Option { + return func(s *server) { + s.promRegistry = reg + } +} + +func WithRoutingTimeout(timeout time.Duration) Option { + return func(s *server) { + s.routingTimeout = timeout + } +} + func Handler(svc ContentRouter, opts ...Option) http.Handler { server := &server{ svc: svc, recordsLimit: DefaultRecordsLimit, streamingRecordsLimit: DefaultStreamingRecordsLimit, + routingTimeout: DefaultRoutingTimeout, } for _, opt := range opts { opt(server) } + if server.promRegistry == nil { + server.promRegistry = prometheus.DefaultRegisterer + } + + // Workaround due to https://github.com/slok/go-http-metrics + // using egistry.MustRegister internally. + // In production there will be only one handler, however we append counter + // to ensure duplicate metric registration will not panic in parallel tests + // when global prometheus.DefaultRegisterer is used. + metricsPrefix := "delegated_routing_server" + c := handlerCount.Add(1) + if c > 1 { + metricsPrefix = fmt.Sprintf("%s_%d", metricsPrefix, c) + } + + // Create middleware with prometheus recorder + mdlw := middleware.New(middleware.Config{ + Recorder: metrics.NewRecorder(metrics.Config{ + Registry: server.promRegistry, + Prefix: metricsPrefix, + SizeBuckets: prometheus.ExponentialBuckets(100, 4, 8), // [100 400 1600 6400 25600 102400 409600 1.6384e+06] + DurationBuckets: []float64{0.1, 0.5, 1, 2, 5, 8, 10, 20, 30}, + }), + }) + r := mux.NewRouter() - r.HandleFunc(findProvidersPath, server.findProviders).Methods(http.MethodGet) - r.HandleFunc(providePath, server.provide).Methods(http.MethodPut) - r.HandleFunc(findPeersPath, server.findPeers).Methods(http.MethodGet) - r.HandleFunc(GetIPNSPath, server.GetIPNS).Methods(http.MethodGet) - r.HandleFunc(GetIPNSPath, server.PutIPNS).Methods(http.MethodPut) + // Wrap each handler with the metrics middleware + r.Handle(findProvidersPath, middlewarestd.Handler(findProvidersPath, mdlw, http.HandlerFunc(server.findProviders))).Methods(http.MethodGet) + r.Handle(providePath, middlewarestd.Handler(providePath, mdlw, http.HandlerFunc(server.provide))).Methods(http.MethodPut) + r.Handle(findPeersPath, middlewarestd.Handler(findPeersPath, mdlw, http.HandlerFunc(server.findPeers))).Methods(http.MethodGet) + r.Handle(GetIPNSPath, middlewarestd.Handler(GetIPNSPath, mdlw, http.HandlerFunc(server.GetIPNS))).Methods(http.MethodGet) + r.Handle(GetIPNSPath, middlewarestd.Handler(GetIPNSPath, mdlw, http.HandlerFunc(server.PutIPNS))).Methods(http.MethodPut) + return r } +var handlerCount atomic.Int32 + type server struct { svc ContentRouter disableNDJSON bool recordsLimit int streamingRecordsLimit int + promRegistry prometheus.Registerer + routingTimeout time.Duration } func (s *server) detectResponseType(r *http.Request) (string, error) { @@ -196,8 +247,8 @@ func (s *server) findProviders(w http.ResponseWriter, httpReq *http.Request) { // Parse query parameters query := httpReq.URL.Query() - filterAddrs := parseFilter(query.Get("filter-addrs")) - filterProtocols := parseFilter(query.Get("filter-protocols")) + filterAddrs := filters.ParseFilter(query.Get("filter-addrs")) + filterProtocols := filters.ParseFilter(query.Get("filter-protocols")) mediaType, err := s.detectResponseType(httpReq) if err != nil { @@ -218,7 +269,10 @@ func (s *server) findProviders(w http.ResponseWriter, httpReq *http.Request) { recordsLimit = s.recordsLimit } - provIter, err := s.svc.FindProviders(httpReq.Context(), cid, recordsLimit) + ctx, cancel := context.WithTimeout(httpReq.Context(), s.routingTimeout) + defer cancel() + + provIter, err := s.svc.FindProviders(ctx, cid, recordsLimit) if err != nil { if errors.Is(err, routing.ErrNotFound) { // handlerFunc takes care of setting the 404 and necessary headers @@ -235,7 +289,7 @@ func (s *server) findProviders(w http.ResponseWriter, httpReq *http.Request) { func (s *server) findProvidersJSON(w http.ResponseWriter, provIter iter.ResultIter[types.Record], filterAddrs, filterProtocols []string) { defer provIter.Close() - filteredIter := applyFiltersToIter(provIter, filterAddrs, filterProtocols) + filteredIter := filters.ApplyFiltersToIter(provIter, filterAddrs, filterProtocols) providers, err := iter.ReadAllResults(filteredIter) if err != nil { writeErr(w, "FindProviders", http.StatusInternalServerError, fmt.Errorf("delegate error: %w", err)) @@ -247,7 +301,7 @@ func (s *server) findProvidersJSON(w http.ResponseWriter, provIter iter.ResultIt }) } func (s *server) findProvidersNDJSON(w http.ResponseWriter, provIter iter.ResultIter[types.Record], filterAddrs, filterProtocols []string) { - filteredIter := applyFiltersToIter(provIter, filterAddrs, filterProtocols) + filteredIter := filters.ApplyFiltersToIter(provIter, filterAddrs, filterProtocols) writeResultsIterNDJSON(w, filteredIter) } @@ -285,8 +339,8 @@ func (s *server) findPeers(w http.ResponseWriter, r *http.Request) { } query := r.URL.Query() - filterAddrs := parseFilter(query.Get("filter-addrs")) - filterProtocols := parseFilter(query.Get("filter-protocols")) + filterAddrs := filters.ParseFilter(query.Get("filter-addrs")) + filterProtocols := filters.ParseFilter(query.Get("filter-protocols")) mediaType, err := s.detectResponseType(r) if err != nil { @@ -307,7 +361,11 @@ func (s *server) findPeers(w http.ResponseWriter, r *http.Request) { recordsLimit = s.recordsLimit } - provIter, err := s.svc.FindPeers(r.Context(), pid, recordsLimit) + // Add timeout to the routing operation + ctx, cancel := context.WithTimeout(r.Context(), s.routingTimeout) + defer cancel() + + provIter, err := s.svc.FindPeers(ctx, pid, recordsLimit) if err != nil { if errors.Is(err, routing.ErrNotFound) { // handlerFunc takes care of setting the 404 and necessary headers @@ -383,29 +441,9 @@ func (s *server) provide(w http.ResponseWriter, httpReq *http.Request) { func (s *server) findPeersJSON(w http.ResponseWriter, peersIter iter.ResultIter[*types.PeerRecord], filterAddrs, filterProtocols []string) { defer peersIter.Close() - // Convert PeerRecord to Record so that we can reuse the filtering logic from findProviders - mappedIter := iter.Map(peersIter, func(v iter.Result[*types.PeerRecord]) iter.Result[types.Record] { - if v.Err != nil || v.Val == nil { - return iter.Result[types.Record]{Err: v.Err} - } - - var record types.Record = v.Val - return iter.Result[types.Record]{Val: record} - }) + peersIter = filters.ApplyFiltersToPeerRecordIter(peersIter, filterAddrs, filterProtocols) - filteredIter := applyFiltersToIter(mappedIter, filterAddrs, filterProtocols) - - // Convert Record back to PeerRecord 🙃 - finalIter := iter.Map(filteredIter, func(v iter.Result[types.Record]) iter.Result[*types.PeerRecord] { - if v.Err != nil || v.Val == nil { - return iter.Result[*types.PeerRecord]{Err: v.Err} - } - - var record *types.PeerRecord = v.Val.(*types.PeerRecord) - return iter.Result[*types.PeerRecord]{Val: record} - }) - - peers, err := iter.ReadAllResults(finalIter) + peers, err := iter.ReadAllResults(peersIter) if err != nil { writeErr(w, "FindPeers", http.StatusInternalServerError, fmt.Errorf("delegate error: %w", err)) @@ -428,7 +466,7 @@ func (s *server) findPeersNDJSON(w http.ResponseWriter, peersIter iter.ResultIte return iter.Result[types.Record]{Val: record} }) - filteredIter := applyFiltersToIter(mappedIter, filterAddrs, filterProtocols) + filteredIter := filters.ApplyFiltersToIter(mappedIter, filterAddrs, filterProtocols) writeResultsIterNDJSON(w, filteredIter) } @@ -458,7 +496,10 @@ func (s *server) GetIPNS(w http.ResponseWriter, r *http.Request) { return } - record, err := s.svc.GetIPNS(r.Context(), name) + ctx, cancel := context.WithTimeout(r.Context(), s.routingTimeout) + defer cancel() + + record, err := s.svc.GetIPNS(ctx, name) if err != nil { if errors.Is(err, routing.ErrNotFound) { writeErr(w, "GetIPNS", http.StatusNotFound, fmt.Errorf("delegate error: %w", err)) @@ -542,7 +583,10 @@ func (s *server) PutIPNS(w http.ResponseWriter, r *http.Request) { return } - err = s.svc.PutIPNS(r.Context(), name, record) + ctx, cancel := context.WithTimeout(r.Context(), s.routingTimeout) + defer cancel() + + err = s.svc.PutIPNS(ctx, name, record) if err != nil { writeErr(w, "PutIPNS", http.StatusInternalServerError, fmt.Errorf("delegate error: %w", err)) return diff --git a/routing/http/server/server_test.go b/routing/http/server/server_test.go index 772f79999..bf84e4155 100644 --- a/routing/http/server/server_test.go +++ b/routing/http/server/server_test.go @@ -16,6 +16,7 @@ import ( "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/routing/http/filters" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" "github.com/ipfs/go-cid" @@ -153,15 +154,7 @@ func TestProviders(t *testing.T) { router.On("FindProviders", mock.Anything, cid, limit).Return(results, nil) urlStr := fmt.Sprintf("%s/routing/v1/providers/%s", serverAddr, cidStr) - if filterAddrs != "" || filterProtocols != "" { - urlStr += "?" - if filterAddrs != "" { - urlStr = fmt.Sprintf("%s&filter-addrs=%s", urlStr, filterAddrs) - } - if filterProtocols != "" { - urlStr = fmt.Sprintf("%s&filter-protocols=%s", urlStr, filterProtocols) - } - } + urlStr = filters.AddFiltersToURL(urlStr, strings.Split(filterProtocols, ","), strings.Split(filterAddrs, ",")) req, err := http.NewRequest(http.MethodGet, urlStr, nil) require.NoError(t, err) @@ -273,15 +266,8 @@ func TestPeers(t *testing.T) { t.Cleanup(server.Close) urlStr := fmt.Sprintf("http://%s/routing/v1/peers/%s", server.Listener.Addr().String(), arg) - if filterAddrs != "" || filterProtocols != "" { - urlStr += "?" - if filterAddrs != "" { - urlStr = fmt.Sprintf("%s&filter-addrs=%s", urlStr, filterAddrs) - } - if filterProtocols != "" { - urlStr = fmt.Sprintf("%s&filter-protocols=%s", urlStr, filterProtocols) - } - } + urlStr = filters.AddFiltersToURL(urlStr, strings.Split(filterProtocols, ","), strings.Split(filterAddrs, ",")) + req, err := http.NewRequest(http.MethodGet, urlStr, nil) require.NoError(t, err) if contentType != "" { diff --git a/routing/mock/centralized_client.go b/routing/mock/centralized_client.go index 02c68d100..2c2135bb8 100644 --- a/routing/mock/centralized_client.go +++ b/routing/mock/centralized_client.go @@ -47,11 +47,12 @@ func (c *client) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInfo, erro } func (c *client) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo { + log.Debugf("FindProvidersAsync: %s %d", k, max) out := make(chan peer.AddrInfo) go func() { defer close(out) for i, p := range c.server.Providers(k) { - if max <= i { + if max > 0 && max <= i { return } select { diff --git a/routing/mock/centralized_server.go b/routing/mock/centralized_server.go index d55de7081..85c768814 100644 --- a/routing/mock/centralized_server.go +++ b/routing/mock/centralized_server.go @@ -39,7 +39,7 @@ func (rs *s) Announce(p peer.AddrInfo, c cid.Cid) error { rs.lock.Lock() defer rs.lock.Unlock() - k := c.KeyString() + k := c.Hash().String() _, ok := rs.providers[k] if !ok { @@ -54,16 +54,16 @@ func (rs *s) Announce(p peer.AddrInfo, c cid.Cid) error { func (rs *s) Providers(c cid.Cid) []peer.AddrInfo { rs.delayConf.Query.Wait() // before locking - rs.lock.RLock() defer rs.lock.RUnlock() - k := c.KeyString() + k := c.Hash().String() var ret []peer.AddrInfo records, ok := rs.providers[k] if !ok { return ret } + for _, r := range records { if time.Since(r.Created) > rs.delayConf.ValueVisibility.Get() { ret = append(ret, r.Peer) @@ -74,7 +74,6 @@ func (rs *s) Providers(c cid.Cid) []peer.AddrInfo { j := rand.Intn(i + 1) ret[i], ret[j] = ret[j], ret[i] } - return ret } diff --git a/routing/providerquerymanager/providerquerymanager.go b/routing/providerquerymanager/providerquerymanager.go new file mode 100644 index 000000000..592f7f814 --- /dev/null +++ b/routing/providerquerymanager/providerquerymanager.go @@ -0,0 +1,541 @@ +package providerquerymanager + +import ( + "context" + "sync" + "time" + + "github.com/gammazero/chanqueue" + "github.com/gammazero/deque" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" + peer "github.com/libp2p/go-libp2p/core/peer" + swarm "github.com/libp2p/go-libp2p/p2p/net/swarm" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap/zapcore" +) + +var log = logging.Logger("routing/provqrymgr") + +const ( + defaultMaxInProcessRequests = 16 + defaultMaxProviders = 0 + defaultTimeout = 10 * time.Second +) + +type inProgressRequestStatus struct { + ctx context.Context + cancelFn func() + providersSoFar []peer.AddrInfo + listeners map[chan peer.AddrInfo]struct{} +} + +type findProviderRequest struct { + k cid.Cid + ctx context.Context +} + +// ProviderQueryDialer is an interface for connecting to peers. Usually a +// libp2p.Host +type ProviderQueryDialer interface { + Connect(context.Context, peer.AddrInfo) error +} + +// ProviderQueryRouter is an interface for finding providers. Usually a libp2p +// ContentRouter. +type ProviderQueryRouter interface { + FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.AddrInfo +} + +type providerQueryMessage interface { + debugMessage() + handle(pqm *ProviderQueryManager) +} + +type receivedProviderMessage struct { + ctx context.Context + k cid.Cid + p peer.AddrInfo +} + +type finishedProviderQueryMessage struct { + ctx context.Context + k cid.Cid +} + +type newProvideQueryMessage struct { + ctx context.Context + k cid.Cid + inProgressRequestChan chan<- inProgressRequest +} + +type cancelRequestMessage struct { + ctx context.Context + incomingProviders chan peer.AddrInfo + k cid.Cid +} + +// ProviderQueryManager manages requests to find more providers for blocks +// for bitswap sessions. It's main goals are to: +// - rate limit requests -- don't have too many find provider calls running +// simultaneously +// - connect to found peers and filter them if it can't connect +// - ensure two findprovider calls for the same block don't run concurrently +// - manage timeouts +type ProviderQueryManager struct { + closeOnce sync.Once + closing chan struct{} + dialer ProviderQueryDialer + router ProviderQueryRouter + providerQueryMessages chan providerQueryMessage + providerRequestsProcessing *chanqueue.ChanQueue[*findProviderRequest] + + findProviderTimeout time.Duration + + maxProviders int + maxInProcessRequests int + + // do not touch outside the run loop + inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus +} + +type Option func(*ProviderQueryManager) error + +// WithMaxTimeout sets the limit on the amount of time to spend waiting for the +// maximum number of providers from a find request. +func WithMaxTimeout(timeout time.Duration) Option { + return func(mgr *ProviderQueryManager) error { + mgr.findProviderTimeout = timeout + return nil + } +} + +// WithMaxInProcessRequests is the maximum number of requests that can be +// processed in parallel. If this is 0, then the number is unlimited. Default +// is defaultMaxInProcessRequests (16). +func WithMaxInProcessRequests(count int) Option { + return func(mgr *ProviderQueryManager) error { + mgr.maxInProcessRequests = count + return nil + } +} + +// WithMaxProviders is the maximum number of providers that will be looked up +// per query. We only return providers that we can connect to. Defaults to 0, +// which means unbounded. +func WithMaxProviders(count int) Option { + return func(mgr *ProviderQueryManager) error { + mgr.maxProviders = count + return nil + } +} + +// New initializes a new ProviderQueryManager for a given context and a given +// network provider. +func New(dialer ProviderQueryDialer, router ProviderQueryRouter, opts ...Option) (*ProviderQueryManager, error) { + pqm := &ProviderQueryManager{ + closing: make(chan struct{}), + dialer: dialer, + router: router, + providerQueryMessages: make(chan providerQueryMessage), + findProviderTimeout: defaultTimeout, + maxInProcessRequests: defaultMaxInProcessRequests, + maxProviders: defaultMaxProviders, + } + + for _, o := range opts { + if err := o(pqm); err != nil { + return nil, err + } + } + + go pqm.run() + + return pqm, nil +} + +func (pqm *ProviderQueryManager) Close() { + pqm.closeOnce.Do(func() { + close(pqm.closing) + }) +} + +type inProgressRequest struct { + providersSoFar []peer.AddrInfo + incoming chan peer.AddrInfo +} + +// FindProvidersAsync finds providers for the given block. The max parameter +// controls how many will be returned at most. For a provider to be returned, +// we must have successfully connected to it. Setting max to 0 will use the +// configured MaxProviders which defaults to 0 (unbounded). +func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo { + if max == 0 { + max = pqm.maxProviders + } + + inProgressRequestChan := make(chan inProgressRequest) + + var span trace.Span + sessionCtx, span = otel.Tracer("routing").Start(sessionCtx, "ProviderQueryManager.FindProvidersAsync", trace.WithAttributes(attribute.Stringer("cid", k))) + + select { + case pqm.providerQueryMessages <- &newProvideQueryMessage{ + ctx: sessionCtx, + k: k, + inProgressRequestChan: inProgressRequestChan, + }: + case <-pqm.closing: + ch := make(chan peer.AddrInfo) + close(ch) + span.End() + return ch + case <-sessionCtx.Done(): + ch := make(chan peer.AddrInfo) + close(ch) + return ch + } + + // DO NOT select on sessionCtx. We only want to abort here if we're + // shutting down because we can't actually _cancel_ the request till we + // get to receiveProviders. + var receivedInProgressRequest inProgressRequest + select { + case <-pqm.closing: + ch := make(chan peer.AddrInfo) + close(ch) + span.End() + return ch + case receivedInProgressRequest = <-inProgressRequestChan: + } + + return pqm.receiveProviders(sessionCtx, k, max, receivedInProgressRequest, func() { span.End() }) +} + +func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, max int, receivedInProgressRequest inProgressRequest, onCloseFn func()) <-chan peer.AddrInfo { + // maintains an unbuffered queue for incoming providers for given request + // for a given session. Essentially, as a provider comes in, for a given + // CID, immediately broadcast to all sessions that queried that CID, + // without worrying about whether the client code is actually reading from + // the returned channel -- so that the broadcast never blocks. + returnedProviders := make(chan peer.AddrInfo) + var receivedProviders deque.Deque[peer.AddrInfo] + receivedProviders.Grow(len(receivedInProgressRequest.providersSoFar)) + for _, addrInfo := range receivedInProgressRequest.providersSoFar { + receivedProviders.PushBack(addrInfo) + } + incomingProviders := receivedInProgressRequest.incoming + + // count how many providers we received from our workers etc. + // these providers should be peers we managed to connect to. + total := receivedProviders.Len() + go func() { + defer close(returnedProviders) + defer onCloseFn() + outgoingProviders := func() chan<- peer.AddrInfo { + if receivedProviders.Len() == 0 { + return nil + } + return returnedProviders + } + nextProvider := func() peer.AddrInfo { + if receivedProviders.Len() == 0 { + return peer.AddrInfo{} + } + return receivedProviders.Front() + } + + stopWhenMaxReached := func() { + if max > 0 && total >= max { + if incomingProviders != nil { + // drains incomingProviders. + pqm.cancelProviderRequest(sessionCtx, k, incomingProviders) + incomingProviders = nil + } + } + } + + // Handle the case when providersSoFar already is more than we + // need. + stopWhenMaxReached() + + for receivedProviders.Len() > 0 || incomingProviders != nil { + select { + case <-pqm.closing: + return + case <-sessionCtx.Done(): + if incomingProviders != nil { + pqm.cancelProviderRequest(sessionCtx, k, incomingProviders) + } + return + case provider, ok := <-incomingProviders: + if !ok { + incomingProviders = nil + } else { + receivedProviders.PushBack(provider) + total++ + stopWhenMaxReached() + // we do not return, we will loop on + // the case below until + // len(receivedProviders) == 0, which + // means they have all been sent out + // via returnedProviders + } + case outgoingProviders() <- nextProvider(): + receivedProviders.PopFront() + } + } + }() + return returnedProviders +} + +func (pqm *ProviderQueryManager) cancelProviderRequest(ctx context.Context, k cid.Cid, incomingProviders chan peer.AddrInfo) { + cancelMessageChannel := pqm.providerQueryMessages + for { + select { + case cancelMessageChannel <- &cancelRequestMessage{ + ctx: ctx, + incomingProviders: incomingProviders, + k: k, + }: + cancelMessageChannel = nil + // clear out any remaining providers, in case and "incoming provider" + // messages get processed before our cancel message + case _, ok := <-incomingProviders: + if !ok { + return + } + case <-pqm.closing: + return + } + } +} + +// findProviderWorker cycles through incoming provider queries one at a time. +func (pqm *ProviderQueryManager) findProviderWorker() { + var findSem chan struct{} + // If limiting the number of concurrent requests, create a counting + // semaphore to enforce this limit. + if pqm.maxInProcessRequests > 0 { + findSem = make(chan struct{}, pqm.maxInProcessRequests) + } + + // Read find provider requests until channel is closed. The channel is + // closed as soon as pqm.Close is called, so there is no need to select on + // any other channel to detect shutdown. + for fpr := range pqm.providerRequestsProcessing.Out() { + if findSem != nil { + select { + case findSem <- struct{}{}: + case <-pqm.closing: + return + } + } + + go func(ctx context.Context, k cid.Cid) { + if findSem != nil { + defer func() { + <-findSem + }() + } + + log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) + findProviderCtx, cancel := context.WithTimeout(ctx, pqm.findProviderTimeout) + span := trace.SpanFromContext(findProviderCtx) + span.AddEvent("StartFindProvidersAsync") + // We set count == 0. We will cancel the query manually once we + // have enough. This assumes the ContentDiscovery + // implementation does that, which a requirement per the + // libp2p/core/routing interface. + providers := pqm.router.FindProvidersAsync(findProviderCtx, k, 0) + wg := &sync.WaitGroup{} + for p := range providers { + wg.Add(1) + go func(p peer.AddrInfo) { + defer wg.Done() + span.AddEvent("FoundProvider", trace.WithAttributes(attribute.Stringer("peer", p.ID))) + err := pqm.dialer.Connect(findProviderCtx, p) + if err != nil && err != swarm.ErrDialToSelf { + span.RecordError(err, trace.WithAttributes(attribute.Stringer("peer", p.ID))) + log.Debugf("failed to connect to provider %s: %s", p.ID, err) + return + } + span.AddEvent("ConnectedToProvider", trace.WithAttributes(attribute.Stringer("peer", p.ID))) + select { + case pqm.providerQueryMessages <- &receivedProviderMessage{ + ctx: ctx, + k: k, + p: p, + }: + case <-pqm.closing: + return + } + }(p) + } + wg.Wait() + cancel() + select { + case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ + ctx: ctx, + k: k, + }: + case <-pqm.closing: + } + }(fpr.ctx, fpr.k) + } +} + +func (pqm *ProviderQueryManager) cleanupInProcessRequests() { + for _, requestStatus := range pqm.inProgressRequestStatuses { + for listener := range requestStatus.listeners { + close(listener) + } + requestStatus.cancelFn() + } +} + +func (pqm *ProviderQueryManager) run() { + defer pqm.cleanupInProcessRequests() + + pqm.providerRequestsProcessing = chanqueue.New[*findProviderRequest]() + defer pqm.providerRequestsProcessing.Shutdown() + + go pqm.findProviderWorker() + + for { + select { + case nextMessage := <-pqm.providerQueryMessages: + nextMessage.debugMessage() + nextMessage.handle(pqm) + case <-pqm.closing: + return + } + } +} + +func (rpm *receivedProviderMessage) debugMessage() { + log.Debugf("Received provider (%s) (%s)", rpm.p, rpm.k) + trace.SpanFromContext(rpm.ctx).AddEvent("ReceivedProvider", trace.WithAttributes(attribute.Stringer("provider", rpm.p), attribute.Stringer("cid", rpm.k))) +} + +func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] + if !ok { + log.Debugf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) + return + } + requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) + for listener := range requestStatus.listeners { + select { + case listener <- rpm.p: + case <-pqm.closing: + return + } + } +} + +func (fpqm *finishedProviderQueryMessage) debugMessage() { + log.Debugf("Finished Provider Query on cid: %s", fpqm.k) + trace.SpanFromContext(fpqm.ctx).AddEvent("FinishedProviderQuery", trace.WithAttributes(attribute.Stringer("cid", fpqm.k))) +} + +func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] + if !ok { + // we canceled the request as it finished. + return + } + for listener := range requestStatus.listeners { + close(listener) + } + delete(pqm.inProgressRequestStatuses, fpqm.k) + if len(pqm.inProgressRequestStatuses) == 0 { + pqm.inProgressRequestStatuses = nil + } + requestStatus.cancelFn() +} + +func (npqm *newProvideQueryMessage) debugMessage() { + log.Debugf("New Provider Query on cid: %s", npqm.k) + trace.SpanFromContext(npqm.ctx).AddEvent("NewProvideQuery", trace.WithAttributes(attribute.Stringer("cid", npqm.k))) +} + +func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] + if !ok { + ctx, cancelFn := context.WithCancel(context.Background()) + span := trace.SpanFromContext(npqm.ctx) + span.AddEvent("NewQuery", trace.WithAttributes(attribute.Stringer("cid", npqm.k))) + ctx = trace.ContextWithSpan(ctx, span) + + // Use context derived from background here, and not the context from the + // request (npqm.ctx), because this inProgressRequestStatus applies to + // all in-progress requests for the CID (npqm.k). + // + // For tracing, this means that only the span from the first + // request-in-progress for a CID is used, even if there are multiple + // requests for the same CID. + requestStatus = &inProgressRequestStatus{ + listeners: make(map[chan peer.AddrInfo]struct{}), + ctx: ctx, + cancelFn: cancelFn, + } + + if pqm.inProgressRequestStatuses == nil { + pqm.inProgressRequestStatuses = make(map[cid.Cid]*inProgressRequestStatus) + } + pqm.inProgressRequestStatuses[npqm.k] = requestStatus + + select { + case pqm.providerRequestsProcessing.In() <- &findProviderRequest{ + k: npqm.k, + ctx: ctx, + }: + case <-pqm.closing: + return + } + } else { + trace.SpanFromContext(npqm.ctx).AddEvent("JoinQuery", trace.WithAttributes(attribute.Stringer("cid", npqm.k))) + if log.Level().Enabled(zapcore.DebugLevel) { + log.Debugf("Joined existing query for cid %s which now has %d queries in progress", npqm.k, len(requestStatus.listeners)+1) + } + } + inProgressChan := make(chan peer.AddrInfo) + requestStatus.listeners[inProgressChan] = struct{}{} + select { + case npqm.inProgressRequestChan <- inProgressRequest{ + providersSoFar: requestStatus.providersSoFar, + incoming: inProgressChan, + }: + case <-pqm.closing: + } +} + +func (crm *cancelRequestMessage) debugMessage() { + log.Debugf("Cancel provider query on cid: %s", crm.k) + trace.SpanFromContext(crm.ctx).AddEvent("CancelRequest", trace.WithAttributes(attribute.Stringer("cid", crm.k))) +} + +func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { + requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] + if !ok { + // Request finished while queued. + return + } + _, ok = requestStatus.listeners[crm.incomingProviders] + if !ok { + // Request finished and _restarted_ while queued. + return + } + delete(requestStatus.listeners, crm.incomingProviders) + close(crm.incomingProviders) + if len(requestStatus.listeners) == 0 { + delete(pqm.inProgressRequestStatuses, crm.k) + if len(pqm.inProgressRequestStatuses) == 0 { + pqm.inProgressRequestStatuses = nil + } + requestStatus.cancelFn() + } +} diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/routing/providerquerymanager/providerquerymanager_test.go similarity index 58% rename from bitswap/client/internal/providerquerymanager/providerquerymanager_test.go rename to routing/providerquerymanager/providerquerymanager_test.go index 9deb77f99..8026c5364 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/routing/providerquerymanager/providerquerymanager_test.go @@ -13,27 +13,30 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) -type fakeProviderNetwork struct { +type fakeProviderDialer struct { + connectError error + connectDelay time.Duration +} + +type fakeProviderDiscovery struct { peersFound []peer.ID - connectError error delay time.Duration - connectDelay time.Duration queriesMadeMutex sync.RWMutex queriesMade int liveQueries int } -func (fpn *fakeProviderNetwork) ConnectTo(context.Context, peer.ID) error { - time.Sleep(fpn.connectDelay) - return fpn.connectError +func (fpd *fakeProviderDialer) Connect(context.Context, peer.AddrInfo) error { + time.Sleep(fpd.connectDelay) + return fpd.connectError } -func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { +func (fpn *fakeProviderDiscovery) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.AddrInfo { fpn.queriesMadeMutex.Lock() fpn.queriesMade++ fpn.liveQueries++ fpn.queriesMadeMutex.Unlock() - incomingPeers := make(chan peer.ID) + incomingPeers := make(chan peer.AddrInfo) go func() { defer close(incomingPeers) for _, p := range fpn.peersFound { @@ -44,7 +47,7 @@ func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Ci default: } select { - case incomingPeers <- p: + case incomingPeers <- peer.AddrInfo{ID: p}: case <-ctx.Done(): return } @@ -57,28 +60,35 @@ func (fpn *fakeProviderNetwork) FindProvidersAsync(ctx context.Context, k cid.Ci return incomingPeers } +func mustNotErr[T any](out T, err error) T { + if err != nil { + panic(err) + } + return out +} + func TestNormalSimultaneousFetch(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 1 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() + providerQueryManager := mustNotErr(New(fpd, fpn)) + defer providerQueryManager.Close() keys := random.Cids(2) - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + sessionCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1]) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], 0) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[1], 0) - var firstPeersReceived []peer.ID + var firstPeersReceived []peer.AddrInfo for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } - var secondPeersReceived []peer.ID + var secondPeersReceived []peer.AddrInfo for p := range secondRequestChan { secondPeersReceived = append(secondPeersReceived, p) } @@ -96,26 +106,26 @@ func TestNormalSimultaneousFetch(t *testing.T) { func TestDedupingProviderRequests(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 1 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() + providerQueryManager := mustNotErr(New(fpd, fpn)) + defer providerQueryManager.Close() key := random.Cids(1)[0] - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + sessionCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, 0) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, 0) - var firstPeersReceived []peer.ID + var firstPeersReceived []peer.AddrInfo for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } - var secondPeersReceived []peer.ID + var secondPeersReceived []peer.AddrInfo for p := range secondRequestChan { secondPeersReceived = append(secondPeersReceived, p) } @@ -136,30 +146,31 @@ func TestDedupingProviderRequests(t *testing.T) { func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 1 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() + providerQueryManager := mustNotErr(New(fpd, fpn)) + defer providerQueryManager.Close() key := random.Cids(1)[0] // first session will cancel before done + ctx := context.Background() firstSessionCtx, firstCancel := context.WithTimeout(ctx, 3*time.Millisecond) defer firstCancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key) + firstRequestChan := providerQueryManager.FindProvidersAsync(firstSessionCtx, key, 0) secondSessionCtx, secondCancel := context.WithTimeout(ctx, 5*time.Second) defer secondCancel() - secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key) + secondRequestChan := providerQueryManager.FindProvidersAsync(secondSessionCtx, key, 0) - var firstPeersReceived []peer.ID + var firstPeersReceived []peer.AddrInfo for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } - var secondPeersReceived []peer.ID + var secondPeersReceived []peer.AddrInfo for p := range secondRequestChan { secondPeersReceived = append(secondPeersReceived, p) } @@ -180,29 +191,28 @@ func TestCancelOneRequestDoesNotTerminateAnother(t *testing.T) { func TestCancelManagerExitsGracefully(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 1 * time.Millisecond, } - ctx := context.Background() - managerCtx, managerCancel := context.WithTimeout(ctx, 5*time.Millisecond) - defer managerCancel() - providerQueryManager := New(managerCtx, fpn) - providerQueryManager.Startup() + providerQueryManager := mustNotErr(New(fpd, fpn)) + defer providerQueryManager.Close() + time.AfterFunc(5*time.Millisecond, providerQueryManager.Close) key := random.Cids(1)[0] - sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, 0) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, 0) - var firstPeersReceived []peer.ID + var firstPeersReceived []peer.AddrInfo for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } - var secondPeersReceived []peer.ID + var secondPeersReceived []peer.AddrInfo for p := range secondRequestChan { secondPeersReceived = append(secondPeersReceived, p) } @@ -215,28 +225,29 @@ func TestCancelManagerExitsGracefully(t *testing.T) { func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ - peersFound: peers, + fpd := &fakeProviderDialer{ connectError: errors.New("not able to connect"), - delay: 1 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() + fpn := &fakeProviderDiscovery{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + providerQueryManager := mustNotErr(New(fpd, fpn)) + defer providerQueryManager.Close() key := random.Cids(1)[0] - sessionCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) + sessionCtx, cancel := context.WithTimeout(context.Background(), 20*time.Millisecond) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) - secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, 0) + secondRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, key, 0) - var firstPeersReceived []peer.ID + var firstPeersReceived []peer.AddrInfo for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } - var secondPeersReceived []peer.ID + var secondPeersReceived []peer.AddrInfo for p := range secondRequestChan { secondPeersReceived = append(secondPeersReceived, p) } @@ -247,23 +258,23 @@ func TestPeersWithConnectionErrorsNotAddedToPeerList(t *testing.T) { } func TestRateLimitingRequests(t *testing.T) { + const maxInProcessRequests = 6 + peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 5 * time.Millisecond, } - ctx := context.Background() - ctx, cancel := context.WithCancel(ctx) - defer cancel() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() + providerQueryManager := mustNotErr(New(fpd, fpn, WithMaxInProcessRequests(maxInProcessRequests))) + defer providerQueryManager.Close() keys := random.Cids(maxInProcessRequests + 1) - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + sessionCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - var requestChannels []<-chan peer.ID + var requestChannels []<-chan peer.AddrInfo for i := 0; i < maxInProcessRequests+1; i++ { - requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i])) + requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i], 0)) } time.Sleep(20 * time.Millisecond) fpn.queriesMadeMutex.Lock() @@ -285,22 +296,62 @@ func TestRateLimitingRequests(t *testing.T) { } } +func TestUnlimitedRequests(t *testing.T) { + const inProcessRequests = 11 + + peers := random.Peers(10) + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ + peersFound: peers, + delay: 5 * time.Millisecond, + } + providerQueryManager := mustNotErr(New(fpd, fpn, WithMaxInProcessRequests(0))) + defer providerQueryManager.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + keys := random.Cids(inProcessRequests) + sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + var requestChannels []<-chan peer.AddrInfo + for i := 0; i < inProcessRequests; i++ { + requestChannels = append(requestChannels, providerQueryManager.FindProvidersAsync(sessionCtx, keys[i], 0)) + } + time.Sleep(20 * time.Millisecond) + fpn.queriesMadeMutex.Lock() + if fpn.liveQueries != inProcessRequests { + t.Logf("Queries made: %d\n", fpn.liveQueries) + t.Fatal("Parallel requests appear to be rate limited") + } + fpn.queriesMadeMutex.Unlock() + for i := 0; i < inProcessRequests; i++ { + for range requestChannels[i] { + } + } + + fpn.queriesMadeMutex.Lock() + defer fpn.queriesMadeMutex.Unlock() + if fpn.queriesMade != inProcessRequests { + t.Logf("Queries made: %d\n", fpn.queriesMade) + t.Fatal("Did not make all separate requests") + } +} + func TestFindProviderTimeout(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 10 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(2 * time.Millisecond) + providerQueryManager := mustNotErr(New(fpd, fpn, WithMaxTimeout(2*time.Millisecond))) + defer providerQueryManager.Close() keys := random.Cids(1) - sessionCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + sessionCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) - var firstPeersReceived []peer.ID + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], 0) + var firstPeersReceived []peer.AddrInfo for p := range firstRequestChan { firstPeersReceived = append(firstPeersReceived, p) } @@ -311,19 +362,18 @@ func TestFindProviderTimeout(t *testing.T) { func TestFindProviderPreCanceled(t *testing.T) { peers := random.Peers(10) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 1 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + providerQueryManager := mustNotErr(New(fpd, fpn, WithMaxTimeout(100*time.Millisecond))) + defer providerQueryManager.Close() keys := random.Cids(1) - sessionCtx, cancel := context.WithCancel(ctx) + sessionCtx, cancel := context.WithCancel(context.Background()) cancel() - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], 0) if firstRequestChan == nil { t.Fatal("expected non-nil channel") } @@ -336,18 +386,17 @@ func TestFindProviderPreCanceled(t *testing.T) { func TestCancelFindProvidersAfterCompletion(t *testing.T) { peers := random.Peers(2) - fpn := &fakeProviderNetwork{ + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ peersFound: peers, delay: 1 * time.Millisecond, } - ctx := context.Background() - providerQueryManager := New(ctx, fpn) - providerQueryManager.Startup() - providerQueryManager.SetFindProviderTimeout(100 * time.Millisecond) + providerQueryManager := mustNotErr(New(fpd, fpn, WithMaxTimeout(100*time.Millisecond))) + defer providerQueryManager.Close() keys := random.Cids(1) - sessionCtx, cancel := context.WithCancel(ctx) - firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0]) + sessionCtx, cancel := context.WithCancel(context.Background()) + firstRequestChan := providerQueryManager.FindProvidersAsync(sessionCtx, keys[0], 0) <-firstRequestChan // wait for everything to start. time.Sleep(10 * time.Millisecond) // wait for the incoming providres to stop. cancel() // cancel the context. @@ -365,3 +414,25 @@ func TestCancelFindProvidersAfterCompletion(t *testing.T) { } } } + +func TestLimitedProviders(t *testing.T) { + max := 5 + peers := random.Peers(10) + fpd := &fakeProviderDialer{} + fpn := &fakeProviderDiscovery{ + peersFound: peers, + delay: 1 * time.Millisecond, + } + providerQueryManager := mustNotErr(New(fpd, fpn, WithMaxProviders(max), WithMaxTimeout(100*time.Millisecond))) + defer providerQueryManager.Close() + keys := random.Cids(1) + + providersChan := providerQueryManager.FindProvidersAsync(context.Background(), keys[0], 0) + total := 0 + for range providersChan { + total++ + } + if total != max { + t.Fatal("returned more providers than requested") + } +} diff --git a/tracing/exporters.go b/tracing/exporters.go index 6c57f8456..71a564c7c 100644 --- a/tracing/exporters.go +++ b/tracing/exporters.go @@ -20,7 +20,7 @@ import ( // most of this code. // // Specs: -// - https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection +// - https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#exporter-selection // - https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md func NewSpanExporters(ctx context.Context) ([]trace.SpanExporter, error) { var exporters []trace.SpanExporter diff --git a/version.json b/version.json index 93d6ca712..0bab82458 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.23.0" + "version": "v0.25.0" }