diff --git a/.github/workflows/gateway-conformance.yml b/.github/workflows/gateway-conformance.yml index a2a3734f6..f45e9aedf 100644 --- a/.github/workflows/gateway-conformance.yml +++ b/.github/workflows/gateway-conformance.yml @@ -16,7 +16,7 @@ jobs: steps: # 1. Download the gateway-conformance fixtures - name: Download gateway-conformance fixtures - uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.3 + uses: ipfs/gateway-conformance/.github/actions/extract-fixtures@v0.4 with: output: fixtures merged: true @@ -40,7 +40,7 @@ jobs: # 4. Run the gateway-conformance tests - name: Run gateway-conformance tests - uses: ipfs/gateway-conformance/.github/actions/test@v0.3 + uses: ipfs/gateway-conformance/.github/actions/test@v0.4 with: gateway-url: http://127.0.0.1:8040 json: output.json diff --git a/CHANGELOG.md b/CHANGELOG.md index d3f4979fa..f7cc4329f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,87 @@ The following emojis are used to highlight certain changes: ### Removed +### Security + +## [v0.16.0] + +### Changed + +* 🛠 `boxo/namesys`: now fails when multiple valid DNSLink entries are found for the same domain. This used to cause undefined behavior before. Now, we return an error, according to the [specification](https://dnslink.dev/). + +### Removed + +* 🛠 `boxo/gateway`: removed support for undocumented legacy `ipfs-404.html`. Use [`_redirects`](https://specs.ipfs.tech/http-gateways/web-redirects-file/) instead. +* 🛠 `boxo/namesys`: removed support for legacy DNSLink entries at the root of the domain. Use [`_dnslink.` TXT record](https://docs.ipfs.tech/concepts/dnslink/) instead. +* 🛠 `boxo/coreapi`, an intrinsic part of Kubo, has been removed and moved to `kubo/core/coreiface`. + +### Fixed + +* `boxo/gateway` + * a panic (which is recovered) could sporadically be triggered inside a CAR request, if the right [conditions were met](https://github.com/ipfs/boxo/pull/511). + * no longer emits `http: superfluous response.WriteHeader` warnings when an error happens. + +## [v0.15.0] + +### Changed + +* 🛠 Bumped to [`go-libp2p` 0.32](https://github.com/libp2p/go-libp2p/releases/tag/v0.32.0). + +## [v0.14.0] + +### Added + +* `boxo/gateway`: + * A new `WithResolver(...)` option can be used with `NewBlocksBackend(...)` allowing the user to pass their custom `Resolver` implementation. + * The gateway now sets a `Cache-Control` header for requests under the `/ipns/` namespace if the TTL for the corresponding IPNS Records or DNSLink entities is known. +* `boxo/bitswap/client`: + * A new `WithoutDuplicatedBlockStats()` option can be used with `bitswap.New` and `bsclient.New`. This disable accounting for duplicated blocks, which requires a `blockstore.Has()` lookup for every received block and thus, can impact performance. +* ✨ Migrated repositories into Boxo + * [`github.com/ipfs/kubo/peering`](https://pkg.go.dev/github.com/ipfs/kubo/peering) => [`./peering`](./peering) + A service which establish, overwatch and maintain long lived connections. + * [`github.com/ipfs/kubo/core/bootstrap`](https://pkg.go.dev/github.com/ipfs/kubo/core/bootstrap) => [`./bootstrap](./bootstrap) + A service that maintains connections to a number of bootstrap peers. + +### Changed + +* `boxo/gateway` + * 🛠 The `IPFSBackend` interface was updated to make the responses of the + `Head` method more explicit. It now returns a `HeadResponse` instead of a + `files.Node`. +* `boxo/routing/http/client.Client` is now exported. This means you can now pass + it around functions, or add it to a struct if you want. +* 🛠 The `path` package has been massively refactored. With this refactor, we have + condensed the different path-related and/or Kubo-specific packages under a single generic one. Therefore, there + are many breaking changes. Please consult the [documentation](https://pkg.go.dev/github.com/ipfs/boxo/path) + for more details on how to use the new package. + * Note: content paths created with `boxo/path` are automatically normalized: + - Replace multiple slashes with a single slash. + - Eliminate each `.` path name element (the current directory). + - Eliminate each inner `..` path name element (the parent directory) along with the non-`..` element that precedes it. + - Eliminate `..` elements that begin a rooted path: that is, replace "`/..`" by "`/`" at the beginning of a path. +* 🛠 The signature of `CoreAPI.ResolvePath` in `coreiface` has changed to now return + the remainder segments as a second return value, matching the signature of `resolver.ResolveToLastNode`. +* 🛠 `routing/http/client.FindPeers` now returns `iter.ResultIter[types.PeerRecord]` instead of `iter.ResultIter[types.Record]`. The specification indicates that records for this method will always be Peer Records. +* 🛠 The `namesys` package has been refactored. The following are the largest modifications: + * The options in `coreiface/options/namesys` have been moved to `namesys` and their names + have been made more consistent. + * Many of the exported structs and functions have been renamed in order to be consistent with + the remaining packages. + * `namesys.Resolver.Resolve` now returns a TTL, in addition to the resolved path. If the + TTL is unknown, 0 is returned. `IPNSResolver` is able to resolve a TTL, while `DNSResolver` + is not. + * `namesys/resolver.ResolveIPNS` has been moved to `namesys.ResolveIPNS` and now returns a TTL + in addition to the resolved path. +* ✨ `boxo/ipns` record defaults follow recommendations from [IPNS Record Specification](https://specs.ipfs.tech/ipns/ipns-record/#ipns-record): + * `DefaultRecordTTL` is now set to `1h` + * `DefaultRecordLifetime` follows the increased expiration window of Amino DHT ([go-libp2p-kad-dht#793](https://github.com/libp2p/go-libp2p-kad-dht/pull/793)) and is set to `48h` +* 🛠 The `gateway`'s `IPFSBackend.ResolveMutable` is now expected to return a TTL in addition to + the resolved path. If the TTL is unknown, 0 should be returned. + +### Removed + +* 🛠 `util.MultiErr` has been removed. Please use Go's native support for wrapping errors, or `errors.Join` instead. + ### Fixed ### Security @@ -243,7 +324,7 @@ None. - `InternalKeys` - 🛠 `provider/batched.New` has been moved to `provider.New` and arguments has been changed. (https://github.com/ipfs/boxo/pulls/273) - A routing system is now passed with the `provider.Online` option, by default the system run in offline mode (push stuff onto the queue). - - When using `provider.Online` calling the `.Run` method is not required anymore, the background worker is implicitely started in the background by `provider.New`. + - When using `provider.Online` calling the `.Run` method is not required anymore, the background worker is implicitly started in the background by `provider.New`. - You do not have to pass a queue anymore, you pass a `datastore.Datastore` exclusively. - 🛠 `provider.NewOfflineProvider` has been renamed to `provider.NewNoopProvider` to show more clearly that is does nothing. (https://github.com/ipfs/boxo/pulls/273) - 🛠 `provider.Provider` and `provider.Reprovider` has been merged under one `provider.System`. (https://github.com/ipfs/boxo/pulls/273) diff --git a/README.md b/README.md index eec46e0e3..bd824f512 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ Boxo powers [Kubo](https://github.com/ipfs/kubo), which is [the most popular IPF so its code has been battle-tested on the IPFS network for years, and is well-understood by the community. ### Motivation + **TL;DR** The goal of this repo is to help people build things. Previously users struggled to find existing useful code or to figure out how to use what they did find. We observed many running Kubo and using its HTTP RPC API. This repo aims to do better. We're taking the libraries that many were already effectively relying on in production and making them more easily discoverable and usable. The maintainers primarily aim to help people trying to build with IPFS in Go that were previously either giving up or relying on the [Kubo HTTP RPC API](https://docs.ipfs.tech/reference/kubo/rpc/). Some of these people will end up being better served by IPFS tooling in other languages (e.g., Javascript, Rust, Java, Python), but for those who are either looking to write in Go or to leverage the set of IPFS tooling we already have in Go we’d like to make their lives easier. @@ -73,6 +74,7 @@ Boxo is not exhaustive nor comprehensive--there are plenty of useful IPFS protoc More details can also be found in the [Rationale FAQ](./docs/FAQ.md#rationale-faq). ## Scope + ### What kind of components does Boxo have? Boxo includes high-quality components useful for interacting with IPFS protocols, public and private IPFS networks, and content-addressed data, such as: @@ -86,20 +88,23 @@ Boxo includes high-quality components useful for interacting with IPFS protocols Boxo aims to provide a cohesive interface into these components. Note that not all of the underlying components necessarily reside in this respository. ### Does Boxo == IPFS? -No. This repo houses some IPFS functionality written in Go that has been useful in practice, and is maintained by a group that has long term commitments to the IPFS project -### Is everything related to IPFS in the Go ecosystem in this repo? +No. This repo houses some IPFS functionality written in Go that has been useful in practice, and is maintained by a group that has long term commitments to the IPFS project -No. Not everything related to IPFS is intended to be in Boxo. View it as a starter toolbox (potentially among multiple). If you’d like to build an IPFS implementation with Go, here are some tools you might want that are maintained by a group that has long term commitments to the IPFS project. There are certainly repos that others maintain that aren't included here (e.g., ipfs/go-car) which are still useful to IPFS implementations. It's expected and fine for new IPFS functionality to be developed that won't be part of Boxo. +### Is everything related to IPFS in the Go ecosystem in this repo? +No. Not everything related to IPFS is intended to be in Boxo. View it as a starter toolbox (potentially among multiple). If you’d like to build an IPFS implementation with Go, here are some tools you might want that are maintained by a group that has long term commitments to the IPFS project. There are certainly repos that others maintain that aren't included here (e.g., ipfs/go-car) which are still useful to IPFS implementations. It's expected and fine for new IPFS functionality to be developed that won't be part of Boxo. ## Consuming + ### Getting started + See [examples](./examples/README.md). If you are migrating to Boxo, see [Migrating to Boxo](#migrating-to-boxo). ### Migrating to Boxo + Many Go modules under github.com/ipfs have moved here. Boxo provides a tool to ease this migration, which does most of the work for you: * `cd` into the root directory of your module (where the `go.mod` file is) @@ -116,10 +121,13 @@ We recommend upgrading to v0.8.0 first, and _then_ upgrading to the latest Boxo If you encounter any challenges, please [open an issue](https://github.com/ipfs/boxo/issues/new/choose) and Boxo maintainers will help you. ### Deprecations & Breaking Changes + See [RELEASE.md](./RELEASE.md). ## Development + ### Should I add my IPFS component to Boxo? + We happily accept external contributions! However, Boxo maintains a high quality bar, so code accepted into Boxo must meet some minimum maintenance criteria: * Actively maintained @@ -137,6 +145,7 @@ We happily accept external contributions! However, Boxo maintains a high quality If you have some experimental component that you think would benefit the IPFS community, we suggest you build the component in your own repository until it's clear that there's community demand for it, and then open an issue/PR in this repository to discuss including it in Boxo. ### Release Process + See [RELEASE.md](./RELEASE.md). ### Why is the code coverage so bad? @@ -144,30 +153,38 @@ See [RELEASE.md](./RELEASE.md). The code coverage of this repo is not currently representative of the actual test coverage of this code. Much of the code in this repo is currently covered by integration tests in [Kubo](https://github.com/ipfs/kubo). We are in the process of moving those tests here, and as that continues the code coverage will significantly increase. ## General + ### Help If you have questions, feel free to open an issue. You can also find the Boxo maintainers in [Filecoin Slack](https://filecoin.io/slack/) at #Boxo-maintainers. (If you would like to engage via IPFS Discord or ipfs.io Matrix, please drop into the #ipfs-implementers channel/room or file an issue, and we'll get bridging from #Boxo-maintainers to these other chat platforms.) ### What is the response time for issues or PRs filed? -TODO: fill this in. New issues and PRs to this repo are usually looked at on a weekly basis as part of [Kubo triage](https://pl-strflt.notion.site/Kubo-Issue-Triage-Notes-7d4983e8cf294e07b3cc51b0c60ede9a). + +New issues and PRs to this repo are usually looked at on a weekly basis as part of [Kubo triage](https://pl-strflt.notion.site/Kubo-Issue-Triage-Notes-7d4983e8cf294e07b3cc51b0c60ede9a). However, the response time may vary. ### What are some projects that depend on this project? -The exhaustive list is https://github.com/ipfs/boxo/network/dependents. Some notable projects include: + +The exhaustive list is https://github.com/ipfs/boxo/network/dependents. Some notable projects include: + 1. [Kubo](https://github.com/ipfs/kubo), an IPFS implementation in Go 2. [Lotus](https://github.com/filecoin-project/lotus), a Filecoin implementation in Go -3. [Bifrost Gateway](https://github.com/ipfs/bifrost-gateway), a dedicated IPFS gateway +6. [rainbow](https://github.com/ipfs/rainbow), a specialized IPFS gateway 4. [ipfs-check](https://github.com/ipfs-shipyard/ipfs-check), checks IPFS data availability +5. [someguy](https://github.com/ipfs-shipyard/someguy), a dedicated Delegated Routing V1 server and client +3. [Bifrost Gateway](https://github.com/ipfs/bifrost-gateway), a dedicated IPFS Gateway daemon backed by a remote datastore ### Governance and Access -See [CODEOWNERS](./docs/CODEOWNERS) for the current maintainers list. Governance for graduating additional maintainers hasn't been established. Repo permissions are all managed through [ipfs/github-mgmt](https://github.com/ipfs/github-mgmt). + +See [CODEOWNERS](./docs/CODEOWNERS) for the current maintainers list. Governance for graduating additional maintainers hasn't been established. Repo permissions are all managed through [ipfs/github-mgmt](https://github.com/ipfs/github-mgmt). ### Why is this named "Boxo"? + See https://github.com/ipfs/boxo/issues/215. ### Additional Docs & FAQs + See [the wiki](https://github.com/ipfs/boxo/wiki). ### License [SPDX-License-Identifier: Apache-2.0 OR MIT](LICENSE.md) - diff --git a/RELEASE.md b/RELEASE.md index deadd7231..3d6ac8400 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -27,7 +27,7 @@ The amount of backporting of a fix depends on the severity of the issue and the As a result, Boxo maintainers recommend that consumers stay up-to-date with Boxo releases. ### Go Compatibility -At any given point, the Go team supports only the latest two versions of Go released (see https://go.dev/doc/devel/release). Boxo maintainers will strive to maintain compatibilty with the older of the two supported versions, so that Boxo is also compatible with the latest two versions of Go. +At any given point, the Go team supports only the latest two versions of Go released (see https://go.dev/doc/devel/release). Boxo maintainers will strive to maintain compatibility with the older of the two supported versions, so that Boxo is also compatible with the latest two versions of Go. ### Release Criteria Boxo releases occur _at least_ on every Kubo release. Releases can also be initiated on-demand, regardless of Kubo's release cadence, whenever there are significant changes (new features, refactorings, deprecations, etc.). diff --git a/bitswap/client/client.go b/bitswap/client/client.go index 854d030d1..aa9ab78fa 100644 --- a/bitswap/client/client.go +++ b/bitswap/client/client.go @@ -44,14 +44,21 @@ var log = logging.Logger("bitswap-client") // bitswap instances type Option func(*Client) -// ProviderSearchDelay overwrites the global provider search delay +// ProviderSearchDelay sets the initial dely before triggering a provider +// search to find more peers and broadcast the want list. It also partially +// controls re-broadcasts delay when the session idles (does not receive any +// blocks), but these have back-off logic to increase the interval. See +// [defaults.ProvSearchDelay] for the default. func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { return func(bs *Client) { bs.provSearchDelay = newProvSearchDelay } } -// RebroadcastDelay overwrites the global provider rebroadcast delay +// RebroadcastDelay sets a custom delay for periodic search of a random want. +// When the value ellapses, a random CID from the wantlist is chosen and the +// client attempts to find more peers for it and sends them the single want. +// [defaults.RebroadcastDelay] for the default. func RebroadcastDelay(newRebroadcastDelay delay.D) Option { return func(bs *Client) { bs.rebroadcastDelay = newRebroadcastDelay @@ -79,6 +86,19 @@ func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { } } +// WithoutDuplicatedBlockStats disable collecting counts of duplicated blocks +// received. This counter requires triggering a blockstore.Has() call for +// every block received by launching goroutines in parallel. In the worst case +// (no caching/blooms etc), this is an expensive call for the datastore to +// answer. In a normal case (caching), this has the power of evicting a +// different block from intermediary caches. In the best case, it doesn't +// affect performance. Use if this stat is not relevant. +func WithoutDuplicatedBlockStats() Option { + return func(bs *Client) { + bs.skipDuplicatedBlocksStats = true + } +} + type BlockReceivedNotifier interface { // ReceivedBlocks notifies the decision engine that a peer is well-behaving // and gave us useful data, potentially increasing its score and making us @@ -155,7 +175,7 @@ func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore dupMetric: bmetrics.DupHist(ctx), allMetric: bmetrics.AllHist(ctx), provSearchDelay: defaults.ProvSearchDelay, - rebroadcastDelay: delay.Fixed(time.Minute), + rebroadcastDelay: delay.Fixed(defaults.RebroadcastDelay), simulateDontHavesOnTimeout: true, } @@ -226,6 +246,9 @@ type Client struct { // whether we should actually simulate dont haves on request timeout simulateDontHavesOnTimeout bool + + // dupMetric will stay at 0 + skipDuplicatedBlocksStats bool } type counters struct { @@ -373,14 +396,17 @@ func (bs *Client) updateReceiveCounters(blocks []blocks.Block) { // Check which blocks are in the datastore // (Note: any errors from the blockstore are simply logged out in // blockstoreHas()) - blocksHas := bs.blockstoreHas(blocks) + var blocksHas []bool + if !bs.skipDuplicatedBlocksStats { + blocksHas = bs.blockstoreHas(blocks) + } bs.counterLk.Lock() defer bs.counterLk.Unlock() // Do some accounting for each block for i, b := range blocks { - has := blocksHas[i] + has := (blocksHas != nil) && blocksHas[i] blkLen := len(b.RawData()) bs.allMetric.Observe(float64(blkLen)) diff --git a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go index b0616e4e3..b977c28ff 100644 --- a/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go +++ b/bitswap/client/internal/blockpresencemanager/blockpresencemanager_test.go @@ -218,7 +218,7 @@ func TestAllPeersDoNotHaveBlock(t *testing.T) { {[]peer.ID{p1}, []cid.Cid{c2}, []cid.Cid{}}, {[]peer.ID{p2}, []cid.Cid{c2}, []cid.Cid{c2}}, - // p0 recieved DONT_HAVE for c1 & c2 (but not for c0) + // p0 received DONT_HAVE for c1 & c2 (but not for c0) {[]peer.ID{p0}, []cid.Cid{c0, c1, c2}, []cid.Cid{c1, c2}}, {[]peer.ID{p0, p1}, []cid.Cid{c0, c1, c2}, []cid.Cid{}}, // Both p0 and p2 received DONT_HAVE for c2 diff --git a/bitswap/client/internal/getter/getter.go b/bitswap/client/internal/getter/getter.go index 713394b08..822d319b7 100644 --- a/bitswap/client/internal/getter/getter.go +++ b/bitswap/client/internal/getter/getter.go @@ -21,7 +21,7 @@ type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) // SyncGetBlock takes a block cid and an async function for getting several // blocks that returns a channel, and uses that function to return the -// block syncronously. +// block synchronously. func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { p, span := internal.StartSpan(p, "Getter.SyncGetBlock") defer span.End() diff --git a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go index afdd19595..52447e2c1 100644 --- a/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go +++ b/bitswap/client/internal/providerquerymanager/providerquerymanager_test.go @@ -281,7 +281,7 @@ func TestRateLimitingRequests(t *testing.T) { defer fpn.queriesMadeMutex.Unlock() if fpn.queriesMade != maxInProcessRequests+1 { t.Logf("Queries made: %d\n", fpn.queriesMade) - t.Fatal("Did not make all seperate requests") + t.Fatal("Did not make all separate requests") } } diff --git a/bitswap/internal/defaults/defaults.go b/bitswap/internal/defaults/defaults.go index f9494a0da..f5511cc7a 100644 --- a/bitswap/internal/defaults/defaults.go +++ b/bitswap/internal/defaults/defaults.go @@ -33,4 +33,8 @@ const ( // FIXME: expose this in go-verifcid. MaximumHashLength = 128 MaximumAllowedCid = binary.MaxVarintLen64*4 + MaximumHashLength + + // RebroadcastDelay is the default delay to trigger broadcast of + // random CIDs in the wantlist. + RebroadcastDelay = time.Minute ) diff --git a/bitswap/metrics/metrics.go b/bitswap/metrics/metrics.go index b71923727..e40276842 100644 --- a/bitswap/metrics/metrics.go +++ b/bitswap/metrics/metrics.go @@ -14,11 +14,11 @@ var ( ) func DupHist(ctx context.Context) metrics.Histogram { - return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) + return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks received").Histogram(metricsBuckets) } func AllHist(ctx context.Context) metrics.Histogram { - return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) + return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks received").Histogram(metricsBuckets) } func SentHist(ctx context.Context) metrics.Histogram { diff --git a/bitswap/server/internal/decision/engine_test.go b/bitswap/server/internal/decision/engine_test.go index efc4408ff..b7382f817 100644 --- a/bitswap/server/internal/decision/engine_test.go +++ b/bitswap/server/internal/decision/engine_test.go @@ -138,7 +138,7 @@ func TestConsistentAccounting(t *testing.T) { t.Fatal("Inconsistent book-keeping. Strategies don't agree") } - // Ensure sender didn't record receving anything. And that the receiver + // Ensure sender didn't record receiving anything. And that the receiver // didn't record sending anything if receiver.Engine.numBytesSentTo(sender.Peer) != 0 || sender.Engine.numBytesReceivedFrom(receiver.Peer) != 0 { t.Fatal("Bert didn't send bytes to Ernie") diff --git a/bitswap/server/server.go b/bitswap/server/server.go index f71d6f5f1..7feffd093 100644 --- a/bitswap/server/server.go +++ b/bitswap/server/server.go @@ -398,7 +398,7 @@ func (bs *Server) Stat() (Stat, error) { peers := bs.engine.Peers() peersStr := make([]string, len(peers)) for i, p := range peers { - peersStr[i] = p.Pretty() + peersStr[i] = p.String() } sort.Strings(peersStr) s.Peers = peersStr diff --git a/bootstrap/bootstrap.go b/bootstrap/bootstrap.go new file mode 100644 index 000000000..303405f14 --- /dev/null +++ b/bootstrap/bootstrap.go @@ -0,0 +1,371 @@ +package bootstrap + +import ( + "context" + "errors" + "io" + "math/rand" + "sync" + "sync/atomic" + "time" + + logging "github.com/ipfs/go-log/v2" + "github.com/jbenet/goprocess" + goprocessctx "github.com/jbenet/goprocess/context" + periodicproc "github.com/jbenet/goprocess/periodic" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/routing" +) + +var log = logging.Logger("bootstrap") + +// ErrNotEnoughBootstrapPeers signals that we do not have enough bootstrap +// peers to bootstrap correctly. +var ErrNotEnoughBootstrapPeers = errors.New("not enough bootstrap peers to bootstrap") + +// BootstrapConfig specifies parameters used in an IpfsNode's network +// bootstrapping process. +type BootstrapConfig struct { + // MinPeerThreshold governs whether to bootstrap more connections. If the + // node has less open connections than this number, it will open connections + // to the bootstrap nodes. From there, the routing system should be able + // to use the connections to the bootstrap nodes to connect to even more + // peers. Routing systems like the IpfsDHT do so in their own Bootstrap + // process, which issues random queries to find more peers. + MinPeerThreshold int + + // Period governs the periodic interval at which the node will + // attempt to bootstrap. The bootstrap process is not very expensive, so + // this threshold can afford to be small (<=30s). + Period time.Duration + + // ConnectionTimeout determines how long to wait for a bootstrap + // connection attempt before cancelling it. + ConnectionTimeout time.Duration + + // BootstrapPeers is a function that returns a set of bootstrap peers + // for the bootstrap process to use. This makes it possible for clients + // to control the peers the process uses at any moment. + BootstrapPeers func() []peer.AddrInfo + + // BackupBootstrapInterval governs the periodic interval at which the node will + // attempt to save connected nodes to use as temporary bootstrap peers. + BackupBootstrapInterval time.Duration + + // MaxBackupBootstrapSize controls the maximum number of peers we're saving + // as backup bootstrap peers. + MaxBackupBootstrapSize int + + saveBackupBootstrapPeers func(context.Context, []peer.AddrInfo) + loadBackupBootstrapPeers func(context.Context) []peer.AddrInfo +} + +// DefaultBootstrapConfig specifies default sane parameters for bootstrapping. +var DefaultBootstrapConfig = BootstrapConfig{ + MinPeerThreshold: 4, + Period: 30 * time.Second, + ConnectionTimeout: (30 * time.Second) / 3, // Period / 3 + BackupBootstrapInterval: 1 * time.Hour, + MaxBackupBootstrapSize: 20, +} + +// BootstrapConfigWithPeers creates a default BootstrapConfig configured with +// the specified peers, and optional functions to load and save backup peers. +func BootstrapConfigWithPeers(pis []peer.AddrInfo, options ...func(*BootstrapConfig)) BootstrapConfig { + cfg := DefaultBootstrapConfig + cfg.BootstrapPeers = func() []peer.AddrInfo { + return pis + } + for _, opt := range options { + opt(&cfg) + } + return cfg +} + +// WithBackupPeers configures functions to load and save backup bootstrap peers. +func WithBackupPeers(load func(context.Context) []peer.AddrInfo, save func(context.Context, []peer.AddrInfo)) func(*BootstrapConfig) { + if save == nil && load != nil || save != nil && load == nil { + panic("both load and save backup bootstrap peers functions must be defined") + } + return func(cfg *BootstrapConfig) { + cfg.loadBackupBootstrapPeers = load + cfg.saveBackupBootstrapPeers = save + } +} + +// BackupPeers returns the load and save backup peers functions. +func (cfg *BootstrapConfig) BackupPeers() (func(context.Context) []peer.AddrInfo, func(context.Context, []peer.AddrInfo)) { + return cfg.loadBackupBootstrapPeers, cfg.saveBackupBootstrapPeers +} + +// SetBackupPeers sets the load and save backup peers functions. +func (cfg *BootstrapConfig) SetBackupPeers(load func(context.Context) []peer.AddrInfo, save func(context.Context, []peer.AddrInfo)) { + opt := WithBackupPeers(load, save) + opt(cfg) +} + +// Bootstrap kicks off IpfsNode bootstrapping. This function will periodically +// check the number of open connections and -- if there are too few -- initiate +// connections to well-known bootstrap peers. It also kicks off subsystem +// bootstrapping (i.e. routing). +func Bootstrap(id peer.ID, host host.Host, rt routing.Routing, cfg BootstrapConfig) (io.Closer, error) { + // make a signal to wait for one bootstrap round to complete. + doneWithRound := make(chan struct{}) + + if len(cfg.BootstrapPeers()) == 0 { + // We *need* to bootstrap but we have no bootstrap peers + // configured *at all*, inform the user. + log.Warn("no bootstrap nodes configured: go-ipfs may have difficulty connecting to the network") + } + + // the periodic bootstrap function -- the connection supervisor + periodic := func(worker goprocess.Process) { + ctx := goprocessctx.OnClosingContext(worker) + + if err := bootstrapRound(ctx, host, cfg); err != nil { + log.Debugf("%s bootstrap error: %s", id, err) + } + + // Exit the first call (triggered independently by `proc.Go`, not `Tick`) + // only after being done with the *single* Routing.Bootstrap call. Following + // periodic calls (`Tick`) will not block on this. + <-doneWithRound + } + + // kick off the node's periodic bootstrapping + proc := periodicproc.Tick(cfg.Period, periodic) + proc.Go(periodic) // run one right now. + + // kick off Routing.Bootstrap + if rt != nil { + ctx := goprocessctx.OnClosingContext(proc) + if err := rt.Bootstrap(ctx); err != nil { + proc.Close() + return nil, err + } + } + + doneWithRound <- struct{}{} + close(doneWithRound) // it no longer blocks periodic + + // If loadBackupBootstrapPeers is not nil then saveBackupBootstrapPeers + // must also not be nil. + if cfg.loadBackupBootstrapPeers != nil { + startSavePeersAsTemporaryBootstrapProc(cfg, host, proc) + } + + return proc, nil +} + +// Aside of the main bootstrap process we also run a secondary one that saves +// connected peers as a backup measure if we can't connect to the official +// bootstrap ones. These peers will serve as *temporary* bootstrap nodes. +func startSavePeersAsTemporaryBootstrapProc(cfg BootstrapConfig, host host.Host, bootstrapProc goprocess.Process) { + savePeersFn := func(worker goprocess.Process) { + ctx := goprocessctx.OnClosingContext(worker) + + if err := saveConnectedPeersAsTemporaryBootstrap(ctx, host, cfg); err != nil { + log.Debugf("saveConnectedPeersAsTemporaryBootstrap error: %s", err) + } + } + savePeersProc := periodicproc.Tick(cfg.BackupBootstrapInterval, savePeersFn) + + // When the main bootstrap process ends also terminate the 'save connected + // peers' ones. Coupling the two seems the easiest way to handle this backup + // process without additional complexity. + go func() { + <-bootstrapProc.Closing() + savePeersProc.Close() + }() + + // Run the first round now (after the first bootstrap process has finished) + // as the SavePeersPeriod can be much longer than bootstrap. + savePeersProc.Go(savePeersFn) +} + +func saveConnectedPeersAsTemporaryBootstrap(ctx context.Context, host host.Host, cfg BootstrapConfig) error { + // Randomize the list of connected peers, we don't prioritize anyone. + connectedPeers := randomizeList(host.Network().Peers()) + + bootstrapPeers := cfg.BootstrapPeers() + backupPeers := make([]peer.AddrInfo, 0, cfg.MaxBackupBootstrapSize) + foundPeers := make(map[peer.ID]struct{}, cfg.MaxBackupBootstrapSize+len(bootstrapPeers)) + + // Don't record bootstrap peers + for _, b := range bootstrapPeers { + foundPeers[b.ID] = struct{}{} + } + + // Choose peers to save and filter out the ones that are already bootstrap nodes. + for _, p := range connectedPeers { + if _, found := foundPeers[p]; found { + continue + } + foundPeers[p] = struct{}{} + + backupPeers = append(backupPeers, peer.AddrInfo{ + ID: p, + Addrs: host.Network().Peerstore().Addrs(p), + }) + + if len(backupPeers) >= cfg.MaxBackupBootstrapSize { + break + } + } + + // If we didn't reach the target number use previously stored connected peers. + if len(backupPeers) < cfg.MaxBackupBootstrapSize { + oldSavedPeers := cfg.loadBackupBootstrapPeers(ctx) + log.Debugf("missing %d peers to reach backup bootstrap target of %d, trying from previous list of %d saved peers", + cfg.MaxBackupBootstrapSize-len(backupPeers), cfg.MaxBackupBootstrapSize, len(oldSavedPeers)) + + // Add some of the old saved peers. Ensure we don't duplicate them. + for _, p := range oldSavedPeers { + if _, found := foundPeers[p.ID]; found { + continue + } + foundPeers[p.ID] = struct{}{} + + backupPeers = append(backupPeers, p) + + if len(backupPeers) >= cfg.MaxBackupBootstrapSize { + break + } + } + } + + cfg.saveBackupBootstrapPeers(ctx, backupPeers) + log.Debugf("saved %d peers (of %d target) as bootstrap backup in the config", len(backupPeers), cfg.MaxBackupBootstrapSize) + return nil +} + +// Connect to as many peers needed to reach the BootstrapConfig.MinPeerThreshold. +// Peers can be original bootstrap or temporary ones (drawn from a list of +// persisted previously connected peers). +func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) error { + ctx, cancel := context.WithTimeout(ctx, cfg.ConnectionTimeout) + defer cancel() + id := host.ID() + + // get bootstrap peers from config. retrieving them here makes + // sure we remain observant of changes to client configuration. + peers := cfg.BootstrapPeers() + // determine how many bootstrap connections to open + connected := host.Network().Peers() + if len(connected) >= cfg.MinPeerThreshold { + log.Debugf("%s core bootstrap skipped -- connected to %d (> %d) nodes", + id, len(connected), cfg.MinPeerThreshold) + return nil + } + numToDial := cfg.MinPeerThreshold - len(connected) // numToDial > 0 + + if len(peers) > 0 { + numToDial -= int(peersConnect(ctx, host, peers, numToDial, true)) + if numToDial <= 0 { + return nil + } + } + + if cfg.loadBackupBootstrapPeers == nil { + log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections", numToDial) + return ErrNotEnoughBootstrapPeers + } + + log.Debugf("not enough bootstrap peers to fill the remaining target of %d connections, trying backup list", numToDial) + + tempBootstrapPeers := cfg.loadBackupBootstrapPeers(ctx) + if len(tempBootstrapPeers) > 0 { + numToDial -= int(peersConnect(ctx, host, tempBootstrapPeers, numToDial, false)) + if numToDial <= 0 { + return nil + } + } + + log.Debugf("tried both original bootstrap peers and temporary ones but still missing target of %d connections", numToDial) + + return ErrNotEnoughBootstrapPeers +} + +// Attempt to make `needed` connections from the `availablePeers` list. Mark +// peers as either `permanent` or temporary when adding them to the Peerstore. +// Return the number of connections completed. We eagerly over-connect in parallel, +// so we might connect to more than needed. +// (We spawn as many routines and attempt connections as the number of availablePeers, +// but this list comes from restricted sets of original or temporary bootstrap +// nodes which will keep it under a sane value.) +func peersConnect(ctx context.Context, ph host.Host, availablePeers []peer.AddrInfo, needed int, permanent bool) uint64 { + peers := randomizeList(availablePeers) + + // Monitor the number of connections and stop if we reach the target. + var connected uint64 + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { + for { + select { + case <-ctx.Done(): + return + case <-time.After(1 * time.Second): + if int(atomic.LoadUint64(&connected)) >= needed { + cancel() + return + } + } + } + }() + + var wg sync.WaitGroup + for _, p := range peers { + + // performed asynchronously because when performed synchronously, if + // one `Connect` call hangs, subsequent calls are more likely to + // fail/abort due to an expiring context. + // Also, performed asynchronously for dial speed. + + if int(atomic.LoadUint64(&connected)) >= needed { + cancel() + break + } + + wg.Add(1) + go func(p peer.AddrInfo) { + defer wg.Done() + + // Skip addresses belonging to a peer we're already connected to. + // (Not a guarantee but a best-effort policy.) + if ph.Network().Connectedness(p.ID) == network.Connected { + return + } + log.Debugf("%s bootstrapping to %s", ph.ID(), p.ID) + + if err := ph.Connect(ctx, p); err != nil { + if ctx.Err() != context.Canceled { + log.Debugf("failed to bootstrap with %v: %s", p.ID, err) + } + return + } + if permanent { + // We're connecting to an original bootstrap peer, mark it as + // a permanent address (Connect will register it as TempAddrTTL). + ph.Peerstore().AddAddrs(p.ID, p.Addrs, peerstore.PermanentAddrTTL) + } + + log.Infof("bootstrapped with %v", p.ID) + atomic.AddUint64(&connected, 1) + }(p) + } + wg.Wait() + + return connected +} + +func randomizeList[T any](in []T) []T { + out := make([]T, len(in)) + for i, val := range rand.Perm(len(in)) { + out[i] = in[val] + } + return out +} diff --git a/bootstrap/bootstrap_test.go b/bootstrap/bootstrap_test.go new file mode 100644 index 000000000..d933379d4 --- /dev/null +++ b/bootstrap/bootstrap_test.go @@ -0,0 +1,139 @@ +package bootstrap + +import ( + "context" + "crypto/rand" + "reflect" + "testing" + "time" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/test" +) + +func TestRandomizeAddressList(t *testing.T) { + var ps []peer.AddrInfo + sizeofSlice := 10 + for i := 0; i < sizeofSlice; i++ { + pid, err := test.RandPeerID() + if err != nil { + t.Fatal(err) + } + + ps = append(ps, peer.AddrInfo{ID: pid}) + } + out := randomizeList(ps) + if len(out) != len(ps) { + t.Fail() + } +} + +func TestLoadAndSaveOptions(t *testing.T) { + loadFunc := func(_ context.Context) []peer.AddrInfo { return nil } + saveFunc := func(_ context.Context, _ []peer.AddrInfo) {} + + bootCfg := BootstrapConfigWithPeers(nil, WithBackupPeers(loadFunc, saveFunc)) + load, save := bootCfg.BackupPeers() + if load == nil { + t.Fatal("load function not assigned") + } + if reflect.ValueOf(load).Pointer() != reflect.ValueOf(loadFunc).Pointer() { + t.Fatal("load not assigned correct function") + } + if save == nil { + t.Fatal("save function not assigned") + } + if reflect.ValueOf(save).Pointer() != reflect.ValueOf(saveFunc).Pointer() { + t.Fatal("save not assigned correct function") + } + + assertPanics(t, "with only load func", func() { + BootstrapConfigWithPeers(nil, WithBackupPeers(loadFunc, nil)) + }) + + assertPanics(t, "with only save func", func() { + BootstrapConfigWithPeers(nil, WithBackupPeers(nil, saveFunc)) + }) + + bootCfg = BootstrapConfigWithPeers(nil, WithBackupPeers(nil, nil)) + load, save = bootCfg.BackupPeers() + if load != nil || save != nil { + t.Fatal("load and save functions should both be nil") + } +} + +func TestSetBackupPeers(t *testing.T) { + loadFunc := func(_ context.Context) []peer.AddrInfo { return nil } + saveFunc := func(_ context.Context, _ []peer.AddrInfo) {} + + bootCfg := DefaultBootstrapConfig + bootCfg.SetBackupPeers(loadFunc, saveFunc) + load, save := bootCfg.BackupPeers() + if load == nil { + t.Fatal("load function not assigned") + } + if reflect.ValueOf(load).Pointer() != reflect.ValueOf(loadFunc).Pointer() { + t.Fatal("load not assigned correct function") + } + if save == nil { + t.Fatal("save function not assigned") + } + if reflect.ValueOf(save).Pointer() != reflect.ValueOf(saveFunc).Pointer() { + t.Fatal("save not assigned correct function") + } + + assertPanics(t, "with only load func", func() { + bootCfg.SetBackupPeers(loadFunc, nil) + }) + + assertPanics(t, "with only save func", func() { + bootCfg.SetBackupPeers(nil, saveFunc) + }) + + bootCfg.SetBackupPeers(nil, nil) + load, save = bootCfg.BackupPeers() + if load != nil || save != nil { + t.Fatal("load and save functions should both be nil") + } +} + +func TestNoTempPeersLoadAndSave(t *testing.T) { + period := 500 * time.Millisecond + bootCfg := BootstrapConfigWithPeers(nil) + bootCfg.MinPeerThreshold = 2 + bootCfg.Period = period + + priv, pub, err := crypto.GenerateEd25519Key(rand.Reader) + if err != nil { + t.Fatal(err) + } + peerID, err := peer.IDFromPublicKey(pub) + if err != nil { + t.Fatal(err) + } + p2pHost, err := libp2p.New(libp2p.Identity(priv)) + if err != nil { + t.Fatal(err) + } + + bootstrapper, err := Bootstrap(peerID, p2pHost, nil, bootCfg) + if err != nil { + t.Fatal(err) + } + + time.Sleep(4 * period) + bootstrapper.Close() + +} + +func assertPanics(t *testing.T, name string, f func()) { + defer func() { + if r := recover(); r == nil { + t.Errorf("%s: did not panic as expected", name) + } + }() + + f() +} diff --git a/coreiface/block.go b/coreiface/block.go deleted file mode 100644 index dbe31e9f8..000000000 --- a/coreiface/block.go +++ /dev/null @@ -1,38 +0,0 @@ -package iface - -import ( - "context" - "io" - - path "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" -) - -// BlockStat contains information about a block -type BlockStat interface { - // Size is the size of a block - Size() int - - // Path returns path to the block - Path() path.Resolved -} - -// BlockAPI specifies the interface to the block layer -type BlockAPI interface { - // Put imports raw block data, hashing it using specified settings. - Put(context.Context, io.Reader, ...options.BlockPutOption) (BlockStat, error) - - // Get attempts to resolve the path and return a reader for data in the block - Get(context.Context, path.Path) (io.Reader, error) - - // Rm removes the block specified by the path from local blockstore. - // By default an error will be returned if the block can't be found locally. - // - // NOTE: If the specified block is pinned it won't be removed and no error - // will be returned - Rm(context.Context, path.Path, ...options.BlockRmOption) error - - // Stat returns information on - Stat(context.Context, path.Path) (BlockStat, error) -} diff --git a/coreiface/coreapi.go b/coreiface/coreapi.go deleted file mode 100644 index 7276a3f60..000000000 --- a/coreiface/coreapi.go +++ /dev/null @@ -1,60 +0,0 @@ -// Package iface defines IPFS Core API which is a set of interfaces used to -// interact with IPFS nodes. -package iface - -import ( - "context" - - path "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" - - ipld "github.com/ipfs/go-ipld-format" -) - -// CoreAPI defines an unified interface to IPFS for Go programs -type CoreAPI interface { - // Unixfs returns an implementation of Unixfs API - Unixfs() UnixfsAPI - - // Block returns an implementation of Block API - Block() BlockAPI - - // Dag returns an implementation of Dag API - Dag() APIDagService - - // Name returns an implementation of Name API - Name() NameAPI - - // Key returns an implementation of Key API - Key() KeyAPI - - // Pin returns an implementation of Pin API - Pin() PinAPI - - // Object returns an implementation of Object API - Object() ObjectAPI - - // Dht returns an implementation of Dht API - Dht() DhtAPI - - // Swarm returns an implementation of Swarm API - Swarm() SwarmAPI - - // PubSub returns an implementation of PubSub API - PubSub() PubSubAPI - - // Routing returns an implementation of Routing API - Routing() RoutingAPI - - // ResolvePath resolves the path using Unixfs resolver - ResolvePath(context.Context, path.Path) (path.Resolved, error) - - // ResolveNode resolves the path (if not resolved already) using Unixfs - // resolver, gets and returns the resolved Node - ResolveNode(context.Context, path.Path) (ipld.Node, error) - - // WithOptions creates new instance of CoreAPI based on this instance with - // a set of options applied - WithOptions(...options.ApiOption) (CoreAPI, error) -} diff --git a/coreiface/dag.go b/coreiface/dag.go deleted file mode 100644 index 3cc3aeb4d..000000000 --- a/coreiface/dag.go +++ /dev/null @@ -1,13 +0,0 @@ -package iface - -import ( - ipld "github.com/ipfs/go-ipld-format" -) - -// APIDagService extends ipld.DAGService -type APIDagService interface { - ipld.DAGService - - // Pinning returns special NodeAdder which recursively pins added nodes - Pinning() ipld.NodeAdder -} diff --git a/coreiface/dht.go b/coreiface/dht.go deleted file mode 100644 index 93027a406..000000000 --- a/coreiface/dht.go +++ /dev/null @@ -1,27 +0,0 @@ -package iface - -import ( - "context" - - "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" - - "github.com/libp2p/go-libp2p/core/peer" -) - -// DhtAPI specifies the interface to the DHT -// Note: This API will likely get deprecated in near future, see -// https://github.com/ipfs/interface-ipfs-core/issues/249 for more context. -type DhtAPI interface { - // FindPeer queries the DHT for all of the multiaddresses associated with a - // Peer ID - FindPeer(context.Context, peer.ID) (peer.AddrInfo, error) - - // FindProviders finds peers in the DHT who can provide a specific value - // given a key. - FindProviders(context.Context, path.Path, ...options.DhtFindProvidersOption) (<-chan peer.AddrInfo, error) - - // Provide announces to the network that you are providing given values - Provide(context.Context, path.Path, ...options.DhtProvideOption) error -} diff --git a/coreiface/errors.go b/coreiface/errors.go deleted file mode 100644 index e0bd7805d..000000000 --- a/coreiface/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package iface - -import "errors" - -var ( - ErrIsDir = errors.New("this dag node is a directory") - ErrNotFile = errors.New("this dag node is not a regular file") - ErrOffline = errors.New("this action must be run in online mode, try running 'ipfs daemon' first") - ErrNotSupported = errors.New("operation not supported") -) diff --git a/coreiface/idfmt.go b/coreiface/idfmt.go deleted file mode 100644 index 80fd0f822..000000000 --- a/coreiface/idfmt.go +++ /dev/null @@ -1,19 +0,0 @@ -package iface - -import ( - "github.com/libp2p/go-libp2p/core/peer" - mbase "github.com/multiformats/go-multibase" -) - -func FormatKeyID(id peer.ID) string { - if s, err := peer.ToCid(id).StringOfBase(mbase.Base36); err != nil { - panic(err) - } else { - return s - } -} - -// FormatKey formats the given IPNS key in a canonical way. -func FormatKey(key Key) string { - return FormatKeyID(key.ID()) -} diff --git a/coreiface/key.go b/coreiface/key.go deleted file mode 100644 index 118fe2e4f..000000000 --- a/coreiface/key.go +++ /dev/null @@ -1,43 +0,0 @@ -package iface - -import ( - "context" - - "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" - - "github.com/libp2p/go-libp2p/core/peer" -) - -// Key specifies the interface to Keys in KeyAPI Keystore -type Key interface { - // Key returns key name - Name() string - - // Path returns key path - Path() path.Path - - // ID returns key PeerID - ID() peer.ID -} - -// KeyAPI specifies the interface to Keystore -type KeyAPI interface { - // Generate generates new key, stores it in the keystore under the specified - // name and returns a base58 encoded multihash of it's public key - Generate(ctx context.Context, name string, opts ...options.KeyGenerateOption) (Key, error) - - // Rename renames oldName key to newName. Returns the key and whether another - // key was overwritten, or an error - Rename(ctx context.Context, oldName string, newName string, opts ...options.KeyRenameOption) (Key, bool, error) - - // List lists keys stored in keystore - List(ctx context.Context) ([]Key, error) - - // Self returns the 'main' node key - Self(ctx context.Context) (Key, error) - - // Remove removes keys from keystore. Returns ipns path of the removed key - Remove(ctx context.Context, name string) (Key, error) -} diff --git a/coreiface/name.go b/coreiface/name.go deleted file mode 100644 index 8c3e8e89a..000000000 --- a/coreiface/name.go +++ /dev/null @@ -1,41 +0,0 @@ -package iface - -import ( - "context" - "errors" - - path "github.com/ipfs/boxo/coreiface/path" - "github.com/ipfs/boxo/ipns" - - "github.com/ipfs/boxo/coreiface/options" -) - -var ErrResolveFailed = errors.New("could not resolve name") - -type IpnsResult struct { - path.Path - Err error -} - -// NameAPI specifies the interface to IPNS. -// -// IPNS is a PKI namespace, where names are the hashes of public keys, and the -// private key enables publishing new (signed) values. In both publish and -// resolve, the default name used is the node's own PeerID, which is the hash of -// its public key. -// -// You can use .Key API to list and generate more names and their respective keys. -type NameAPI interface { - // Publish announces new IPNS name - Publish(ctx context.Context, path path.Path, opts ...options.NamePublishOption) (ipns.Name, error) - - // Resolve attempts to resolve the newest version of the specified name - Resolve(ctx context.Context, name string, opts ...options.NameResolveOption) (path.Path, error) - - // Search is a version of Resolve which outputs paths as they are discovered, - // reducing the time to first entry - // - // Note: by default, all paths read from the channel are considered unsafe, - // except the latest (last path in channel read buffer). - Search(ctx context.Context, name string, opts ...options.NameResolveOption) (<-chan IpnsResult, error) -} diff --git a/coreiface/object.go b/coreiface/object.go deleted file mode 100644 index d983fa49b..000000000 --- a/coreiface/object.go +++ /dev/null @@ -1,108 +0,0 @@ -package iface - -import ( - "context" - "io" - - path "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" - - "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" -) - -// ObjectStat provides information about dag nodes -type ObjectStat struct { - // Cid is the CID of the node - Cid cid.Cid - - // NumLinks is number of links the node contains - NumLinks int - - // BlockSize is size of the raw serialized node - BlockSize int - - // LinksSize is size of the links block section - LinksSize int - - // DataSize is the size of data block section - DataSize int - - // CumulativeSize is size of the tree (BlockSize + link sizes) - CumulativeSize int -} - -// ChangeType denotes type of change in ObjectChange -type ChangeType int - -const ( - // DiffAdd is set when a link was added to the graph - DiffAdd ChangeType = iota - - // DiffRemove is set when a link was removed from the graph - DiffRemove - - // DiffMod is set when a link was changed in the graph - DiffMod -) - -// ObjectChange represents a change ia a graph -type ObjectChange struct { - // Type of the change, either: - // * DiffAdd - Added a link - // * DiffRemove - Removed a link - // * DiffMod - Modified a link - Type ChangeType - - // Path to the changed link - Path string - - // Before holds the link path before the change. Note that when a link is - // added, this will be nil. - Before path.Resolved - - // After holds the link path after the change. Note that when a link is - // removed, this will be nil. - After path.Resolved -} - -// ObjectAPI specifies the interface to MerkleDAG and contains useful utilities -// for manipulating MerkleDAG data structures. -type ObjectAPI interface { - // New creates new, empty (by default) dag-node. - New(context.Context, ...options.ObjectNewOption) (ipld.Node, error) - - // Put imports the data into merkledag - Put(context.Context, io.Reader, ...options.ObjectPutOption) (path.Resolved, error) - - // Get returns the node for the path - Get(context.Context, path.Path) (ipld.Node, error) - - // Data returns reader for data of the node - Data(context.Context, path.Path) (io.Reader, error) - - // Links returns lint or links the node contains - Links(context.Context, path.Path) ([]*ipld.Link, error) - - // Stat returns information about the node - Stat(context.Context, path.Path) (*ObjectStat, error) - - // AddLink adds a link under the specified path. child path can point to a - // subdirectory within the patent which must be present (can be overridden - // with WithCreate option). - AddLink(ctx context.Context, base path.Path, name string, child path.Path, opts ...options.ObjectAddLinkOption) (path.Resolved, error) - - // RmLink removes a link from the node - RmLink(ctx context.Context, base path.Path, link string) (path.Resolved, error) - - // AppendData appends data to the node - AppendData(context.Context, path.Path, io.Reader) (path.Resolved, error) - - // SetData sets the data contained in the node - SetData(context.Context, path.Path, io.Reader) (path.Resolved, error) - - // Diff returns a set of changes needed to transform the first object into the - // second. - Diff(context.Context, path.Path, path.Path) ([]ObjectChange, error) -} diff --git a/coreiface/options/block.go b/coreiface/options/block.go deleted file mode 100644 index 83a43702c..000000000 --- a/coreiface/options/block.go +++ /dev/null @@ -1,165 +0,0 @@ -package options - -import ( - "fmt" - - cid "github.com/ipfs/go-cid" - mc "github.com/multiformats/go-multicodec" - mh "github.com/multiformats/go-multihash" -) - -type BlockPutSettings struct { - CidPrefix cid.Prefix - Pin bool -} - -type BlockRmSettings struct { - Force bool -} - -type ( - BlockPutOption func(*BlockPutSettings) error - BlockRmOption func(*BlockRmSettings) error -) - -func BlockPutOptions(opts ...BlockPutOption) (*BlockPutSettings, error) { - var cidPrefix cid.Prefix - - // Baseline is CIDv1 raw sha2-255-32 (can be tweaked later via opts) - cidPrefix.Version = 1 - cidPrefix.Codec = uint64(mc.Raw) - cidPrefix.MhType = mh.SHA2_256 - cidPrefix.MhLength = -1 // -1 means len is to be calculated during mh.Sum() - - options := &BlockPutSettings{ - CidPrefix: cidPrefix, - Pin: false, - } - - // Apply any overrides - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -func BlockRmOptions(opts ...BlockRmOption) (*BlockRmSettings, error) { - options := &BlockRmSettings{ - Force: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -type blockOpts struct{} - -var Block blockOpts - -// CidCodec is the modern option for Block.Put which specifies the multicodec to use -// in the CID returned by the Block.Put operation. -// It uses correct codes from go-multicodec and replaces the old Format now with CIDv1 as the default. -func (blockOpts) CidCodec(codecName string) BlockPutOption { - return func(settings *BlockPutSettings) error { - if codecName == "" { - return nil - } - code, err := codeFromName(codecName) - if err != nil { - return err - } - settings.CidPrefix.Codec = uint64(code) - return nil - } -} - -// Map string to code from go-multicodec -func codeFromName(codecName string) (mc.Code, error) { - var cidCodec mc.Code - err := cidCodec.Set(codecName) - return cidCodec, err -} - -// Format is a legacy option for Block.Put which specifies the multicodec to -// use to serialize the object. -// Provided for backward-compatibility only. Use CidCodec instead. -func (blockOpts) Format(format string) BlockPutOption { - return func(settings *BlockPutSettings) error { - if format == "" { - return nil - } - // Opt-in CIDv0 support for backward-compatibility - if format == "v0" { - settings.CidPrefix.Version = 0 - } - - // Fixup a legacy (invalid) names for dag-pb (0x70) - if format == "v0" || format == "protobuf" { - format = "dag-pb" - } - - // Fixup invalid name for dag-cbor (0x71) - if format == "cbor" { - format = "dag-cbor" - } - - // Set code based on name passed as "format" - code, err := codeFromName(format) - if err != nil { - return err - } - settings.CidPrefix.Codec = uint64(code) - - // If CIDv0, ensure all parameters are compatible - // (in theory go-cid would validate this anyway, but we want to provide better errors) - pref := settings.CidPrefix - if pref.Version == 0 { - if pref.Codec != uint64(mc.DagPb) { - return fmt.Errorf("only dag-pb is allowed with CIDv0") - } - if pref.MhType != mh.SHA2_256 || (pref.MhLength != -1 && pref.MhLength != 32) { - return fmt.Errorf("only sha2-255-32 is allowed with CIDv0") - } - } - - return nil - } -} - -// Hash is an option for Block.Put which specifies the multihash settings to use -// when hashing the object. Default is mh.SHA2_256 (0x12). -// If mhLen is set to -1, default length for the hash will be used -func (blockOpts) Hash(mhType uint64, mhLen int) BlockPutOption { - return func(settings *BlockPutSettings) error { - settings.CidPrefix.MhType = mhType - settings.CidPrefix.MhLength = mhLen - return nil - } -} - -// Pin is an option for Block.Put which specifies whether to (recursively) pin -// added blocks -func (blockOpts) Pin(pin bool) BlockPutOption { - return func(settings *BlockPutSettings) error { - settings.Pin = pin - return nil - } -} - -// Force is an option for Block.Rm which, when set to true, will ignore -// non-existing blocks -func (blockOpts) Force(force bool) BlockRmOption { - return func(settings *BlockRmSettings) error { - settings.Force = force - return nil - } -} diff --git a/coreiface/options/dht.go b/coreiface/options/dht.go deleted file mode 100644 index b43bf3e7a..000000000 --- a/coreiface/options/dht.go +++ /dev/null @@ -1,64 +0,0 @@ -package options - -type DhtProvideSettings struct { - Recursive bool -} - -type DhtFindProvidersSettings struct { - NumProviders int -} - -type ( - DhtProvideOption func(*DhtProvideSettings) error - DhtFindProvidersOption func(*DhtFindProvidersSettings) error -) - -func DhtProvideOptions(opts ...DhtProvideOption) (*DhtProvideSettings, error) { - options := &DhtProvideSettings{ - Recursive: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -func DhtFindProvidersOptions(opts ...DhtFindProvidersOption) (*DhtFindProvidersSettings, error) { - options := &DhtFindProvidersSettings{ - NumProviders: 20, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -type dhtOpts struct{} - -var Dht dhtOpts - -// Recursive is an option for Dht.Provide which specifies whether to provide -// the given path recursively -func (dhtOpts) Recursive(recursive bool) DhtProvideOption { - return func(settings *DhtProvideSettings) error { - settings.Recursive = recursive - return nil - } -} - -// NumProviders is an option for Dht.FindProviders which specifies the -// number of peers to look for. Default is 20 -func (dhtOpts) NumProviders(numProviders int) DhtFindProvidersOption { - return func(settings *DhtFindProvidersSettings) error { - settings.NumProviders = numProviders - return nil - } -} diff --git a/coreiface/options/global.go b/coreiface/options/global.go deleted file mode 100644 index 90e2586f1..000000000 --- a/coreiface/options/global.go +++ /dev/null @@ -1,47 +0,0 @@ -package options - -type ApiSettings struct { - Offline bool - FetchBlocks bool -} - -type ApiOption func(*ApiSettings) error - -func ApiOptions(opts ...ApiOption) (*ApiSettings, error) { - options := &ApiSettings{ - Offline: false, - FetchBlocks: true, - } - - return ApiOptionsTo(options, opts...) -} - -func ApiOptionsTo(options *ApiSettings, opts ...ApiOption) (*ApiSettings, error) { - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -type apiOpts struct{} - -var Api apiOpts - -func (apiOpts) Offline(offline bool) ApiOption { - return func(settings *ApiSettings) error { - settings.Offline = offline - return nil - } -} - -// FetchBlocks when set to false prevents api from fetching blocks from the -// network while allowing other services such as IPNS to still be online -func (apiOpts) FetchBlocks(fetch bool) ApiOption { - return func(settings *ApiSettings) error { - settings.FetchBlocks = fetch - return nil - } -} diff --git a/coreiface/options/key.go b/coreiface/options/key.go deleted file mode 100644 index ebff6d5a7..000000000 --- a/coreiface/options/key.go +++ /dev/null @@ -1,89 +0,0 @@ -package options - -const ( - RSAKey = "rsa" - Ed25519Key = "ed25519" - - DefaultRSALen = 2048 -) - -type KeyGenerateSettings struct { - Algorithm string - Size int -} - -type KeyRenameSettings struct { - Force bool -} - -type ( - KeyGenerateOption func(*KeyGenerateSettings) error - KeyRenameOption func(*KeyRenameSettings) error -) - -func KeyGenerateOptions(opts ...KeyGenerateOption) (*KeyGenerateSettings, error) { - options := &KeyGenerateSettings{ - Algorithm: RSAKey, - Size: -1, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -func KeyRenameOptions(opts ...KeyRenameOption) (*KeyRenameSettings, error) { - options := &KeyRenameSettings{ - Force: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -type keyOpts struct{} - -var Key keyOpts - -// Type is an option for Key.Generate which specifies which algorithm -// should be used for the key. Default is options.RSAKey -// -// Supported key types: -// * options.RSAKey -// * options.Ed25519Key -func (keyOpts) Type(algorithm string) KeyGenerateOption { - return func(settings *KeyGenerateSettings) error { - settings.Algorithm = algorithm - return nil - } -} - -// Size is an option for Key.Generate which specifies the size of the key to -// generated. Default is -1 -// -// value of -1 means 'use default size for key type': -// - 2048 for RSA -func (keyOpts) Size(size int) KeyGenerateOption { - return func(settings *KeyGenerateSettings) error { - settings.Size = size - return nil - } -} - -// Force is an option for Key.Rename which specifies whether to allow to -// replace existing keys. -func (keyOpts) Force(force bool) KeyRenameOption { - return func(settings *KeyRenameSettings) error { - settings.Force = force - return nil - } -} diff --git a/coreiface/options/name.go b/coreiface/options/name.go deleted file mode 100644 index 35e78c394..000000000 --- a/coreiface/options/name.go +++ /dev/null @@ -1,131 +0,0 @@ -package options - -import ( - "time" - - ropts "github.com/ipfs/boxo/coreiface/options/namesys" -) - -const ( - DefaultNameValidTime = 24 * time.Hour -) - -type NamePublishSettings struct { - ValidTime time.Duration - Key string - TTL *time.Duration - CompatibleWithV1 bool - AllowOffline bool -} - -type NameResolveSettings struct { - Cache bool - - ResolveOpts []ropts.ResolveOpt -} - -type ( - NamePublishOption func(*NamePublishSettings) error - NameResolveOption func(*NameResolveSettings) error -) - -func NamePublishOptions(opts ...NamePublishOption) (*NamePublishSettings, error) { - options := &NamePublishSettings{ - ValidTime: DefaultNameValidTime, - Key: "self", - - AllowOffline: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -func NameResolveOptions(opts ...NameResolveOption) (*NameResolveSettings, error) { - options := &NameResolveSettings{ - Cache: true, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -type nameOpts struct{} - -var Name nameOpts - -// ValidTime is an option for Name.Publish which specifies for how long the -// entry will remain valid. Default value is 24h -func (nameOpts) ValidTime(validTime time.Duration) NamePublishOption { - return func(settings *NamePublishSettings) error { - settings.ValidTime = validTime - return nil - } -} - -// Key is an option for Name.Publish which specifies the key to use for -// publishing. Default value is "self" which is the node's own PeerID. -// The key parameter must be either PeerID or keystore key alias. -// -// You can use KeyAPI to list and generate more names and their respective keys. -func (nameOpts) Key(key string) NamePublishOption { - return func(settings *NamePublishSettings) error { - settings.Key = key - return nil - } -} - -// AllowOffline is an option for Name.Publish which specifies whether to allow -// publishing when the node is offline. Default value is false -func (nameOpts) AllowOffline(allow bool) NamePublishOption { - return func(settings *NamePublishSettings) error { - settings.AllowOffline = allow - return nil - } -} - -// TTL is an option for Name.Publish which specifies the time duration the -// published record should be cached for (caution: experimental). -func (nameOpts) TTL(ttl time.Duration) NamePublishOption { - return func(settings *NamePublishSettings) error { - settings.TTL = &ttl - return nil - } -} - -// CompatibleWithV1 is an option for [Name.Publish] which specifies if the -// created record should be backwards compatible with V1 IPNS Records. -func (nameOpts) CompatibleWithV1(compatible bool) NamePublishOption { - return func(settings *NamePublishSettings) error { - settings.CompatibleWithV1 = compatible - return nil - } -} - -// Cache is an option for Name.Resolve which specifies if cache should be used. -// Default value is true -func (nameOpts) Cache(cache bool) NameResolveOption { - return func(settings *NameResolveSettings) error { - settings.Cache = cache - return nil - } -} - -func (nameOpts) ResolveOption(opt ropts.ResolveOpt) NameResolveOption { - return func(settings *NameResolveSettings) error { - settings.ResolveOpts = append(settings.ResolveOpts, opt) - return nil - } -} diff --git a/coreiface/options/namesys/opts.go b/coreiface/options/namesys/opts.go deleted file mode 100644 index ed568200b..000000000 --- a/coreiface/options/namesys/opts.go +++ /dev/null @@ -1,131 +0,0 @@ -package nsopts - -import ( - "time" -) - -const ( - // DefaultDepthLimit is the default depth limit used by Resolve. - DefaultDepthLimit = 32 - - // UnlimitedDepth allows infinite recursion in Resolve. You - // probably don't want to use this, but it's here if you absolutely - // trust resolution to eventually complete and can't put an upper - // limit on how many steps it will take. - UnlimitedDepth = 0 - - // DefaultIPNSRecordTTL specifies the time that the record can be cached - // before checking if its validity again. - DefaultIPNSRecordTTL = time.Minute - - // DefaultIPNSRecordEOL specifies the time that the network will cache IPNS - // records after being published. Records should be re-published before this - // interval expires. We use the same default expiration as the DHT. - DefaultIPNSRecordEOL = 48 * time.Hour -) - -// ResolveOpts specifies options for resolving an IPNS path -type ResolveOpts struct { - // Recursion depth limit - Depth uint - // The number of IPNS records to retrieve from the DHT - // (the best record is selected from this set) - DhtRecordCount uint - // The amount of time to wait for DHT records to be fetched - // and verified. A zero value indicates that there is no explicit - // timeout (although there is an implicit timeout due to dial - // timeouts within the DHT) - DhtTimeout time.Duration -} - -// DefaultResolveOpts returns the default options for resolving -// an IPNS path -func DefaultResolveOpts() ResolveOpts { - return ResolveOpts{ - Depth: DefaultDepthLimit, - DhtRecordCount: 16, - DhtTimeout: time.Minute, - } -} - -// ResolveOpt is used to set an option -type ResolveOpt func(*ResolveOpts) - -// Depth is the recursion depth limit -func Depth(depth uint) ResolveOpt { - return func(o *ResolveOpts) { - o.Depth = depth - } -} - -// DhtRecordCount is the number of IPNS records to retrieve from the DHT -func DhtRecordCount(count uint) ResolveOpt { - return func(o *ResolveOpts) { - o.DhtRecordCount = count - } -} - -// DhtTimeout is the amount of time to wait for DHT records to be fetched -// and verified. A zero value indicates that there is no explicit timeout -func DhtTimeout(timeout time.Duration) ResolveOpt { - return func(o *ResolveOpts) { - o.DhtTimeout = timeout - } -} - -// ProcessOpts converts an array of ResolveOpt into a ResolveOpts object -func ProcessOpts(opts []ResolveOpt) ResolveOpts { - rsopts := DefaultResolveOpts() - for _, option := range opts { - option(&rsopts) - } - return rsopts -} - -// PublishOptions specifies options for publishing an IPNS record. -type PublishOptions struct { - EOL time.Time - TTL time.Duration - CompatibleWithV1 bool -} - -// DefaultPublishOptions returns the default options for publishing an IPNS record. -func DefaultPublishOptions() PublishOptions { - return PublishOptions{ - EOL: time.Now().Add(DefaultIPNSRecordEOL), - TTL: DefaultIPNSRecordTTL, - } -} - -// PublishOption is used to set an option for PublishOpts. -type PublishOption func(*PublishOptions) - -// PublishWithEOL sets an EOL. -func PublishWithEOL(eol time.Time) PublishOption { - return func(o *PublishOptions) { - o.EOL = eol - } -} - -// PublishWithEOL sets a TTL. -func PublishWithTTL(ttl time.Duration) PublishOption { - return func(o *PublishOptions) { - o.TTL = ttl - } -} - -// PublishCompatibleWithV1 sets compatibility with IPNS Records V1. -func PublishCompatibleWithV1(compatible bool) PublishOption { - return func(o *PublishOptions) { - o.CompatibleWithV1 = compatible - } -} - -// ProcessPublishOptions converts an array of PublishOpt into a PublishOpts object. -func ProcessPublishOptions(opts []PublishOption) PublishOptions { - rsopts := DefaultPublishOptions() - for _, option := range opts { - option(&rsopts) - } - return rsopts -} diff --git a/coreiface/options/object.go b/coreiface/options/object.go deleted file mode 100644 index b5625a1d6..000000000 --- a/coreiface/options/object.go +++ /dev/null @@ -1,126 +0,0 @@ -package options - -type ObjectNewSettings struct { - Type string -} - -type ObjectPutSettings struct { - InputEnc string - DataType string - Pin bool -} - -type ObjectAddLinkSettings struct { - Create bool -} - -type ( - ObjectNewOption func(*ObjectNewSettings) error - ObjectPutOption func(*ObjectPutSettings) error - ObjectAddLinkOption func(*ObjectAddLinkSettings) error -) - -func ObjectNewOptions(opts ...ObjectNewOption) (*ObjectNewSettings, error) { - options := &ObjectNewSettings{ - Type: "empty", - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -func ObjectPutOptions(opts ...ObjectPutOption) (*ObjectPutSettings, error) { - options := &ObjectPutSettings{ - InputEnc: "json", - DataType: "text", - Pin: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -func ObjectAddLinkOptions(opts ...ObjectAddLinkOption) (*ObjectAddLinkSettings, error) { - options := &ObjectAddLinkSettings{ - Create: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -type objectOpts struct{} - -var Object objectOpts - -// Type is an option for Object.New which allows to change the type of created -// dag node. -// -// Supported types: -// * 'empty' - Empty node -// * 'unixfs-dir' - Empty UnixFS directory -func (objectOpts) Type(t string) ObjectNewOption { - return func(settings *ObjectNewSettings) error { - settings.Type = t - return nil - } -} - -// InputEnc is an option for Object.Put which specifies the input encoding of the -// data. Default is "json". -// -// Supported encodings: -// * "protobuf" -// * "json" -func (objectOpts) InputEnc(e string) ObjectPutOption { - return func(settings *ObjectPutSettings) error { - settings.InputEnc = e - return nil - } -} - -// DataType is an option for Object.Put which specifies the encoding of data -// field when using Json or XML input encoding. -// -// Supported types: -// * "text" (default) -// * "base64" -func (objectOpts) DataType(t string) ObjectPutOption { - return func(settings *ObjectPutSettings) error { - settings.DataType = t - return nil - } -} - -// Pin is an option for Object.Put which specifies whether to pin the added -// objects, default is false -func (objectOpts) Pin(pin bool) ObjectPutOption { - return func(settings *ObjectPutSettings) error { - settings.Pin = pin - return nil - } -} - -// Create is an option for Object.AddLink which specifies whether create required -// directories for the child -func (objectOpts) Create(create bool) ObjectAddLinkOption { - return func(settings *ObjectAddLinkSettings) error { - settings.Create = create - return nil - } -} diff --git a/coreiface/options/pin.go b/coreiface/options/pin.go deleted file mode 100644 index 75c2b8a26..000000000 --- a/coreiface/options/pin.go +++ /dev/null @@ -1,283 +0,0 @@ -package options - -import "fmt" - -// PinAddSettings represent the settings for PinAPI.Add -type PinAddSettings struct { - Recursive bool -} - -// PinLsSettings represent the settings for PinAPI.Ls -type PinLsSettings struct { - Type string -} - -// PinIsPinnedSettings represent the settings for PinAPI.IsPinned -type PinIsPinnedSettings struct { - WithType string -} - -// PinRmSettings represents the settings for PinAPI.Rm -type PinRmSettings struct { - Recursive bool -} - -// PinUpdateSettings represent the settings for PinAPI.Update -type PinUpdateSettings struct { - Unpin bool -} - -// PinAddOption is the signature of an option for PinAPI.Add -type PinAddOption func(*PinAddSettings) error - -// PinLsOption is the signature of an option for PinAPI.Ls -type PinLsOption func(*PinLsSettings) error - -// PinIsPinnedOption is the signature of an option for PinAPI.IsPinned -type PinIsPinnedOption func(*PinIsPinnedSettings) error - -// PinRmOption is the signature of an option for PinAPI.Rm -type PinRmOption func(*PinRmSettings) error - -// PinUpdateOption is the signature of an option for PinAPI.Update -type PinUpdateOption func(*PinUpdateSettings) error - -// PinAddOptions compile a series of PinAddOption into a ready to use -// PinAddSettings and set the default values. -func PinAddOptions(opts ...PinAddOption) (*PinAddSettings, error) { - options := &PinAddSettings{ - Recursive: true, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -// PinLsOptions compile a series of PinLsOption into a ready to use -// PinLsSettings and set the default values. -func PinLsOptions(opts ...PinLsOption) (*PinLsSettings, error) { - options := &PinLsSettings{ - Type: "all", - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -// PinIsPinnedOptions compile a series of PinIsPinnedOption into a ready to use -// PinIsPinnedSettings and set the default values. -func PinIsPinnedOptions(opts ...PinIsPinnedOption) (*PinIsPinnedSettings, error) { - options := &PinIsPinnedSettings{ - WithType: "all", - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -// PinRmOptions compile a series of PinRmOption into a ready to use -// PinRmSettings and set the default values. -func PinRmOptions(opts ...PinRmOption) (*PinRmSettings, error) { - options := &PinRmSettings{ - Recursive: true, - } - - for _, opt := range opts { - if err := opt(options); err != nil { - return nil, err - } - } - - return options, nil -} - -// PinUpdateOptions compile a series of PinUpdateOption into a ready to use -// PinUpdateSettings and set the default values. -func PinUpdateOptions(opts ...PinUpdateOption) (*PinUpdateSettings, error) { - options := &PinUpdateSettings{ - Unpin: true, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -type pinOpts struct { - Ls pinLsOpts - IsPinned pinIsPinnedOpts -} - -// Pin provide an access to all the options for the Pin API. -var Pin pinOpts - -type pinLsOpts struct{} - -// All is an option for Pin.Ls which will make it return all pins. It is -// the default -func (pinLsOpts) All() PinLsOption { - return Pin.Ls.pinType("all") -} - -// Recursive is an option for Pin.Ls which will make it only return recursive -// pins -func (pinLsOpts) Recursive() PinLsOption { - return Pin.Ls.pinType("recursive") -} - -// Direct is an option for Pin.Ls which will make it only return direct (non -// recursive) pins -func (pinLsOpts) Direct() PinLsOption { - return Pin.Ls.pinType("direct") -} - -// Indirect is an option for Pin.Ls which will make it only return indirect pins -// (objects referenced by other recursively pinned objects) -func (pinLsOpts) Indirect() PinLsOption { - return Pin.Ls.pinType("indirect") -} - -// Type is an option for Pin.Ls which will make it only return pins of the given -// type. -// -// Supported values: -// - "direct" - directly pinned objects -// - "recursive" - roots of recursive pins -// - "indirect" - indirectly pinned objects (referenced by recursively pinned -// objects) -// - "all" - all pinned objects (default) -func (pinLsOpts) Type(typeStr string) (PinLsOption, error) { - switch typeStr { - case "all", "direct", "indirect", "recursive": - return Pin.Ls.pinType(typeStr), nil - default: - return nil, fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) - } -} - -// pinType is an option for Pin.Ls which allows to specify which pin types should -// be returned -// -// Supported values: -// - "direct" - directly pinned objects -// - "recursive" - roots of recursive pins -// - "indirect" - indirectly pinned objects (referenced by recursively pinned -// objects) -// - "all" - all pinned objects (default) -func (pinLsOpts) pinType(t string) PinLsOption { - return func(settings *PinLsSettings) error { - settings.Type = t - return nil - } -} - -type pinIsPinnedOpts struct{} - -// All is an option for Pin.IsPinned which will make it search in all type of pins. -// It is the default -func (pinIsPinnedOpts) All() PinIsPinnedOption { - return Pin.IsPinned.pinType("all") -} - -// Recursive is an option for Pin.IsPinned which will make it only search in -// recursive pins -func (pinIsPinnedOpts) Recursive() PinIsPinnedOption { - return Pin.IsPinned.pinType("recursive") -} - -// Direct is an option for Pin.IsPinned which will make it only search in direct -// (non recursive) pins -func (pinIsPinnedOpts) Direct() PinIsPinnedOption { - return Pin.IsPinned.pinType("direct") -} - -// Indirect is an option for Pin.IsPinned which will make it only search indirect -// pins (objects referenced by other recursively pinned objects) -func (pinIsPinnedOpts) Indirect() PinIsPinnedOption { - return Pin.IsPinned.pinType("indirect") -} - -// Type is an option for Pin.IsPinned which will make it only search pins of the given -// type. -// -// Supported values: -// - "direct" - directly pinned objects -// - "recursive" - roots of recursive pins -// - "indirect" - indirectly pinned objects (referenced by recursively pinned -// objects) -// - "all" - all pinned objects (default) -func (pinIsPinnedOpts) Type(typeStr string) (PinIsPinnedOption, error) { - switch typeStr { - case "all", "direct", "indirect", "recursive": - return Pin.IsPinned.pinType(typeStr), nil - default: - return nil, fmt.Errorf("invalid type '%s', must be one of {direct, indirect, recursive, all}", typeStr) - } -} - -// pinType is an option for Pin.IsPinned which allows to specify which pin type the given -// pin is expected to be, speeding up the research. -// -// Supported values: -// - "direct" - directly pinned objects -// - "recursive" - roots of recursive pins -// - "indirect" - indirectly pinned objects (referenced by recursively pinned -// objects) -// - "all" - all pinned objects (default) -func (pinIsPinnedOpts) pinType(t string) PinIsPinnedOption { - return func(settings *PinIsPinnedSettings) error { - settings.WithType = t - return nil - } -} - -// Recursive is an option for Pin.Add which specifies whether to pin an entire -// object tree or just one object. Default: true -func (pinOpts) Recursive(recursive bool) PinAddOption { - return func(settings *PinAddSettings) error { - settings.Recursive = recursive - return nil - } -} - -// RmRecursive is an option for Pin.Rm which specifies whether to recursively -// unpin the object linked to by the specified object(s). This does not remove -// indirect pins referenced by other recursive pins. -func (pinOpts) RmRecursive(recursive bool) PinRmOption { - return func(settings *PinRmSettings) error { - settings.Recursive = recursive - return nil - } -} - -// Unpin is an option for Pin.Update which specifies whether to remove the old pin. -// Default is true. -func (pinOpts) Unpin(unpin bool) PinUpdateOption { - return func(settings *PinUpdateSettings) error { - settings.Unpin = unpin - return nil - } -} diff --git a/coreiface/options/pubsub.go b/coreiface/options/pubsub.go deleted file mode 100644 index 839ef97b1..000000000 --- a/coreiface/options/pubsub.go +++ /dev/null @@ -1,60 +0,0 @@ -package options - -type PubSubPeersSettings struct { - Topic string -} - -type PubSubSubscribeSettings struct { - Discover bool -} - -type ( - PubSubPeersOption func(*PubSubPeersSettings) error - PubSubSubscribeOption func(*PubSubSubscribeSettings) error -) - -func PubSubPeersOptions(opts ...PubSubPeersOption) (*PubSubPeersSettings, error) { - options := &PubSubPeersSettings{ - Topic: "", - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -func PubSubSubscribeOptions(opts ...PubSubSubscribeOption) (*PubSubSubscribeSettings, error) { - options := &PubSubSubscribeSettings{ - Discover: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - return options, nil -} - -type pubsubOpts struct{} - -var PubSub pubsubOpts - -func (pubsubOpts) Topic(topic string) PubSubPeersOption { - return func(settings *PubSubPeersSettings) error { - settings.Topic = topic - return nil - } -} - -func (pubsubOpts) Discover(discover bool) PubSubSubscribeOption { - return func(settings *PubSubSubscribeSettings) error { - settings.Discover = discover - return nil - } -} diff --git a/coreiface/options/routing.go b/coreiface/options/routing.go deleted file mode 100644 index d66d44a0d..000000000 --- a/coreiface/options/routing.go +++ /dev/null @@ -1,35 +0,0 @@ -package options - -type RoutingPutSettings struct { - AllowOffline bool -} - -type RoutingPutOption func(*RoutingPutSettings) error - -func RoutingPutOptions(opts ...RoutingPutOption) (*RoutingPutSettings, error) { - options := &RoutingPutSettings{ - AllowOffline: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -type putOpts struct{} - -var Put putOpts - -// AllowOffline is an option for Routing.Put which specifies whether to allow -// publishing when the node is offline. Default value is false -func (putOpts) AllowOffline(allow bool) RoutingPutOption { - return func(settings *RoutingPutSettings) error { - settings.AllowOffline = allow - return nil - } -} diff --git a/coreiface/options/unixfs.go b/coreiface/options/unixfs.go deleted file mode 100644 index f00fffb87..000000000 --- a/coreiface/options/unixfs.go +++ /dev/null @@ -1,295 +0,0 @@ -package options - -import ( - "errors" - "fmt" - - dag "github.com/ipfs/boxo/ipld/merkledag" - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -type Layout int - -const ( - BalancedLayout Layout = iota - TrickleLayout -) - -type UnixfsAddSettings struct { - CidVersion int - MhType uint64 - - Inline bool - InlineLimit int - RawLeaves bool - RawLeavesSet bool - - Chunker string - Layout Layout - - Pin bool - OnlyHash bool - FsCache bool - NoCopy bool - - Events chan<- interface{} - Silent bool - Progress bool -} - -type UnixfsLsSettings struct { - ResolveChildren bool - UseCumulativeSize bool -} - -type ( - UnixfsAddOption func(*UnixfsAddSettings) error - UnixfsLsOption func(*UnixfsLsSettings) error -) - -func UnixfsAddOptions(opts ...UnixfsAddOption) (*UnixfsAddSettings, cid.Prefix, error) { - options := &UnixfsAddSettings{ - CidVersion: -1, - MhType: mh.SHA2_256, - - Inline: false, - InlineLimit: 32, - RawLeaves: false, - RawLeavesSet: false, - - Chunker: "size-262144", - Layout: BalancedLayout, - - Pin: false, - OnlyHash: false, - FsCache: false, - NoCopy: false, - - Events: nil, - Silent: false, - Progress: false, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, cid.Prefix{}, err - } - } - - // nocopy -> rawblocks - if options.NoCopy && !options.RawLeaves { - // fixed? - if options.RawLeavesSet { - return nil, cid.Prefix{}, fmt.Errorf("nocopy option requires '--raw-leaves' to be enabled as well") - } - - // No, satisfy mandatory constraint. - options.RawLeaves = true - } - - // (hash != "sha2-256") -> CIDv1 - if options.MhType != mh.SHA2_256 { - switch options.CidVersion { - case 0: - return nil, cid.Prefix{}, errors.New("CIDv0 only supports sha2-256") - case 1, -1: - options.CidVersion = 1 - default: - return nil, cid.Prefix{}, fmt.Errorf("unknown CID version: %d", options.CidVersion) - } - } else { - if options.CidVersion < 0 { - // Default to CIDv0 - options.CidVersion = 0 - } - } - - // cidV1 -> raw blocks (by default) - if options.CidVersion > 0 && !options.RawLeavesSet { - options.RawLeaves = true - } - - prefix, err := dag.PrefixForCidVersion(options.CidVersion) - if err != nil { - return nil, cid.Prefix{}, err - } - - prefix.MhType = options.MhType - prefix.MhLength = -1 - - return options, prefix, nil -} - -func UnixfsLsOptions(opts ...UnixfsLsOption) (*UnixfsLsSettings, error) { - options := &UnixfsLsSettings{ - ResolveChildren: true, - } - - for _, opt := range opts { - err := opt(options) - if err != nil { - return nil, err - } - } - - return options, nil -} - -type unixfsOpts struct{} - -var Unixfs unixfsOpts - -// CidVersion specifies which CID version to use. Defaults to 0 unless an option -// that depends on CIDv1 is passed. -func (unixfsOpts) CidVersion(version int) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.CidVersion = version - return nil - } -} - -// Hash function to use. Implies CIDv1 if not set to sha2-256 (default). -// -// Table of functions is declared in https://github.com/multiformats/go-multihash/blob/master/multihash.go -func (unixfsOpts) Hash(mhtype uint64) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.MhType = mhtype - return nil - } -} - -// RawLeaves specifies whether to use raw blocks for leaves (data nodes with no -// links) instead of wrapping them with unixfs structures. -func (unixfsOpts) RawLeaves(enable bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.RawLeaves = enable - settings.RawLeavesSet = true - return nil - } -} - -// Inline tells the adder to inline small blocks into CIDs -func (unixfsOpts) Inline(enable bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Inline = enable - return nil - } -} - -// InlineLimit sets the amount of bytes below which blocks will be encoded -// directly into CID instead of being stored and addressed by it's hash. -// Specifying this option won't enable block inlining. For that use `Inline` -// option. Default: 32 bytes -// -// Note that while there is no hard limit on the number of bytes, it should be -// kept at a reasonably low value, such as 64; implementations may choose to -// reject anything larger. -func (unixfsOpts) InlineLimit(limit int) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.InlineLimit = limit - return nil - } -} - -// Chunker specifies settings for the chunking algorithm to use. -// -// Default: size-262144, formats: -// size-[bytes] - Simple chunker splitting data into blocks of n bytes -// rabin-[min]-[avg]-[max] - Rabin chunker -func (unixfsOpts) Chunker(chunker string) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Chunker = chunker - return nil - } -} - -// Layout tells the adder how to balance data between leaves. -// options.BalancedLayout is the default, it's optimized for static seekable -// files. -// options.TrickleLayout is optimized for streaming data, -func (unixfsOpts) Layout(layout Layout) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Layout = layout - return nil - } -} - -// Pin tells the adder to pin the file root recursively after adding -func (unixfsOpts) Pin(pin bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Pin = pin - return nil - } -} - -// HashOnly will make the adder calculate data hash without storing it in the -// blockstore or announcing it to the network -func (unixfsOpts) HashOnly(hashOnly bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.OnlyHash = hashOnly - return nil - } -} - -// Events specifies channel which will be used to report events about ongoing -// Add operation. -// -// Note that if this channel blocks it may slowdown the adder -func (unixfsOpts) Events(sink chan<- interface{}) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Events = sink - return nil - } -} - -// Silent reduces event output -func (unixfsOpts) Silent(silent bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Silent = silent - return nil - } -} - -// Progress tells the adder whether to enable progress events -func (unixfsOpts) Progress(enable bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.Progress = enable - return nil - } -} - -// FsCache tells the adder to check the filestore for pre-existing blocks -// -// Experimental -func (unixfsOpts) FsCache(enable bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.FsCache = enable - return nil - } -} - -// NoCopy tells the adder to add the files using filestore. Implies RawLeaves. -// -// Experimental -func (unixfsOpts) Nocopy(enable bool) UnixfsAddOption { - return func(settings *UnixfsAddSettings) error { - settings.NoCopy = enable - return nil - } -} - -func (unixfsOpts) ResolveChildren(resolve bool) UnixfsLsOption { - return func(settings *UnixfsLsSettings) error { - settings.ResolveChildren = resolve - return nil - } -} - -func (unixfsOpts) UseCumulativeSize(use bool) UnixfsLsOption { - return func(settings *UnixfsLsSettings) error { - settings.UseCumulativeSize = use - return nil - } -} diff --git a/coreiface/path/path.go b/coreiface/path/path.go deleted file mode 100644 index c26b8692b..000000000 --- a/coreiface/path/path.go +++ /dev/null @@ -1,199 +0,0 @@ -package path - -import ( - "strings" - - ipfspath "github.com/ipfs/boxo/path" - cid "github.com/ipfs/go-cid" -) - -// Path is a generic wrapper for paths used in the API. A path can be resolved -// to a CID using one of Resolve functions in the API. -// -// Paths must be prefixed with a valid prefix: -// -// * /ipfs - Immutable unixfs path (files) -// * /ipld - Immutable ipld path (data) -// * /ipns - Mutable names. Usually resolves to one of the immutable paths -// TODO: /local (MFS) -type Path interface { - // String returns the path as a string. - String() string - - // Namespace returns the first component of the path. - // - // For example path "/ipfs/QmHash", calling Namespace() will return "ipfs" - // - // Calling this method on invalid paths (IsValid() != nil) will result in - // empty string - Namespace() string - - // Mutable returns false if the data pointed to by this path in guaranteed - // to not change. - // - // Note that resolved mutable path can be immutable. - Mutable() bool - - // IsValid checks if this path is a valid ipfs Path, returning nil iff it is - // valid - IsValid() error -} - -// Resolved is a path which was resolved to the last resolvable node. -// ResolvedPaths are guaranteed to return nil from `IsValid` -type Resolved interface { - // Cid returns the CID of the node referenced by the path. Remainder of the - // path is guaranteed to be within the node. - // - // Examples: - // If you have 3 linked objects: QmRoot -> A -> B: - // - // cidB := {"foo": {"bar": 42 }} - // cidA := {"B": {"/": cidB }} - // cidRoot := {"A": {"/": cidA }} - // - // And resolve paths: - // - // * "/ipfs/${cidRoot}" - // * Calling Cid() will return `cidRoot` - // * Calling Root() will return `cidRoot` - // * Calling Remainder() will return `` - // - // * "/ipfs/${cidRoot}/A" - // * Calling Cid() will return `cidA` - // * Calling Root() will return `cidRoot` - // * Calling Remainder() will return `` - // - // * "/ipfs/${cidRoot}/A/B/foo" - // * Calling Cid() will return `cidB` - // * Calling Root() will return `cidRoot` - // * Calling Remainder() will return `foo` - // - // * "/ipfs/${cidRoot}/A/B/foo/bar" - // * Calling Cid() will return `cidB` - // * Calling Root() will return `cidRoot` - // * Calling Remainder() will return `foo/bar` - Cid() cid.Cid - - // Root returns the CID of the root object of the path - // - // Example: - // If you have 3 linked objects: QmRoot -> A -> B, and resolve path - // "/ipfs/QmRoot/A/B", the Root method will return the CID of object QmRoot - // - // For more examples see the documentation of Cid() method - Root() cid.Cid - - // Remainder returns unresolved part of the path - // - // Example: - // If you have 2 linked objects: QmRoot -> A, where A is a CBOR node - // containing the following data: - // - // {"foo": {"bar": 42 }} - // - // When resolving "/ipld/QmRoot/A/foo/bar", Remainder will return "foo/bar" - // - // For more examples see the documentation of Cid() method - Remainder() string - - Path -} - -// path implements coreiface.Path -type path struct { - path string -} - -// resolvedPath implements coreiface.resolvedPath -type resolvedPath struct { - path - cid cid.Cid - root cid.Cid - remainder string -} - -// Join appends provided segments to the base path -func Join(base Path, a ...string) Path { - s := strings.Join(append([]string{base.String()}, a...), "/") - return &path{path: s} -} - -// IpfsPath creates new /ipfs path from the provided CID -func IpfsPath(c cid.Cid) Resolved { - return &resolvedPath{ - path: path{"/ipfs/" + c.String()}, - cid: c, - root: c, - remainder: "", - } -} - -// IpldPath creates new /ipld path from the provided CID -func IpldPath(c cid.Cid) Resolved { - return &resolvedPath{ - path: path{"/ipld/" + c.String()}, - cid: c, - root: c, - remainder: "", - } -} - -// New parses string path to a Path -func New(p string) Path { - if pp, err := ipfspath.ParsePath(p); err == nil { - p = pp.String() - } - - return &path{path: p} -} - -// NewResolvedPath creates new Resolved path. This function performs no checks -// and is intended to be used by resolver implementations. Incorrect inputs may -// cause panics. Handle with care. -func NewResolvedPath(ipath ipfspath.Path, c cid.Cid, root cid.Cid, remainder string) Resolved { - return &resolvedPath{ - path: path{ipath.String()}, - cid: c, - root: root, - remainder: remainder, - } -} - -func (p *path) String() string { - return p.path -} - -func (p *path) Namespace() string { - ip, err := ipfspath.ParsePath(p.path) - if err != nil { - return "" - } - - if len(ip.Segments()) < 1 { - panic("path without namespace") // this shouldn't happen under any scenario - } - return ip.Segments()[0] -} - -func (p *path) Mutable() bool { - // TODO: MFS: check for /local - return p.Namespace() == "ipns" -} - -func (p *path) IsValid() error { - _, err := ipfspath.ParsePath(p.path) - return err -} - -func (p *resolvedPath) Cid() cid.Cid { - return p.cid -} - -func (p *resolvedPath) Root() cid.Cid { - return p.root -} - -func (p *resolvedPath) Remainder() string { - return p.remainder -} diff --git a/coreiface/pin.go b/coreiface/pin.go deleted file mode 100644 index 6b97c6ca5..000000000 --- a/coreiface/pin.go +++ /dev/null @@ -1,66 +0,0 @@ -package iface - -import ( - "context" - - path "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" -) - -// Pin holds information about pinned resource -type Pin interface { - // Path to the pinned object - Path() path.Resolved - - // Type of the pin - Type() string - - // if not nil, an error happened. Everything else should be ignored. - Err() error -} - -// PinStatus holds information about pin health -type PinStatus interface { - // Ok indicates whether the pin has been verified to be correct - Ok() bool - - // BadNodes returns any bad (usually missing) nodes from the pin - BadNodes() []BadPinNode - - // if not nil, an error happened. Everything else should be ignored. - Err() error -} - -// BadPinNode is a node that has been marked as bad by Pin.Verify -type BadPinNode interface { - // Path is the path of the node - Path() path.Resolved - - // Err is the reason why the node has been marked as bad - Err() error -} - -// PinAPI specifies the interface to pining -type PinAPI interface { - // Add creates new pin, be default recursive - pinning the whole referenced - // tree - Add(context.Context, path.Path, ...options.PinAddOption) error - - // Ls returns list of pinned objects on this node - Ls(context.Context, ...options.PinLsOption) (<-chan Pin, error) - - // IsPinned returns whether or not the given cid is pinned - // and an explanation of why its pinned - IsPinned(context.Context, path.Path, ...options.PinIsPinnedOption) (string, bool, error) - - // Rm removes pin for object specified by the path - Rm(context.Context, path.Path, ...options.PinRmOption) error - - // Update changes one pin to another, skipping checks for matching paths in - // the old tree - Update(ctx context.Context, from path.Path, to path.Path, opts ...options.PinUpdateOption) error - - // Verify verifies the integrity of pinned objects - Verify(context.Context) (<-chan PinStatus, error) -} diff --git a/coreiface/pubsub.go b/coreiface/pubsub.go deleted file mode 100644 index bbd1da4ec..000000000 --- a/coreiface/pubsub.go +++ /dev/null @@ -1,48 +0,0 @@ -package iface - -import ( - "context" - "io" - - "github.com/ipfs/boxo/coreiface/options" - - "github.com/libp2p/go-libp2p/core/peer" -) - -// PubSubSubscription is an active PubSub subscription -type PubSubSubscription interface { - io.Closer - - // Next return the next incoming message - Next(context.Context) (PubSubMessage, error) -} - -// PubSubMessage is a single PubSub message -type PubSubMessage interface { - // From returns id of a peer from which the message has arrived - From() peer.ID - - // Data returns the message body - Data() []byte - - // Seq returns message identifier - Seq() []byte - - // Topics returns list of topics this message was set to - Topics() []string -} - -// PubSubAPI specifies the interface to PubSub -type PubSubAPI interface { - // Ls lists subscribed topics by name - Ls(context.Context) ([]string, error) - - // Peers list peers we are currently pubsubbing with - Peers(context.Context, ...options.PubSubPeersOption) ([]peer.ID, error) - - // Publish a message to a given pubsub topic - Publish(context.Context, string, []byte) error - - // Subscribe to messages on a given topic - Subscribe(context.Context, string, ...options.PubSubSubscribeOption) (PubSubSubscription, error) -} diff --git a/coreiface/routing.go b/coreiface/routing.go deleted file mode 100644 index 5099c3de0..000000000 --- a/coreiface/routing.go +++ /dev/null @@ -1,16 +0,0 @@ -package iface - -import ( - "context" - - "github.com/ipfs/boxo/coreiface/options" -) - -// RoutingAPI specifies the interface to the routing layer. -type RoutingAPI interface { - // Get retrieves the best value for a given key - Get(context.Context, string) ([]byte, error) - - // Put sets a value for a given key - Put(ctx context.Context, key string, value []byte, opts ...options.RoutingPutOption) error -} diff --git a/coreiface/swarm.go b/coreiface/swarm.go deleted file mode 100644 index 9aa5466ba..000000000 --- a/coreiface/swarm.go +++ /dev/null @@ -1,57 +0,0 @@ -package iface - -import ( - "context" - "errors" - "time" - - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/protocol" - - ma "github.com/multiformats/go-multiaddr" -) - -var ( - ErrNotConnected = errors.New("not connected") - ErrConnNotFound = errors.New("conn not found") -) - -// ConnectionInfo contains information about a peer -type ConnectionInfo interface { - // ID returns PeerID - ID() peer.ID - - // Address returns the multiaddress via which we are connected with the peer - Address() ma.Multiaddr - - // Direction returns which way the connection was established - Direction() network.Direction - - // Latency returns last known round trip time to the peer - Latency() (time.Duration, error) - - // Streams returns list of streams established with the peer - Streams() ([]protocol.ID, error) -} - -// SwarmAPI specifies the interface to libp2p swarm -type SwarmAPI interface { - // Connect to a given peer - Connect(context.Context, peer.AddrInfo) error - - // Disconnect from a given address - Disconnect(context.Context, ma.Multiaddr) error - - // Peers returns the list of peers we are connected to - Peers(context.Context) ([]ConnectionInfo, error) - - // KnownAddrs returns the list of all addresses this node is aware of - KnownAddrs(context.Context) (map[peer.ID][]ma.Multiaddr, error) - - // LocalAddrs returns the list of announced listening addresses - LocalAddrs(context.Context) ([]ma.Multiaddr, error) - - // ListenAddrs returns the list of all listening addresses - ListenAddrs(context.Context) ([]ma.Multiaddr, error) -} diff --git a/coreiface/tests/api.go b/coreiface/tests/api.go deleted file mode 100644 index a66e2abeb..000000000 --- a/coreiface/tests/api.go +++ /dev/null @@ -1,110 +0,0 @@ -package tests - -import ( - "context" - "errors" - "testing" - "time" - - coreiface "github.com/ipfs/boxo/coreiface" -) - -var errAPINotImplemented = errors.New("api not implemented") - -type Provider interface { - // Make creates n nodes. fullIdentity set to false can be ignored - MakeAPISwarm(t *testing.T, ctx context.Context, fullIdentity bool, online bool, n int) ([]coreiface.CoreAPI, error) -} - -func (tp *TestSuite) makeAPISwarm(t *testing.T, ctx context.Context, fullIdentity bool, online bool, n int) ([]coreiface.CoreAPI, error) { - if tp.apis != nil { - tp.apis <- 1 - go func() { - <-ctx.Done() - tp.apis <- -1 - }() - } - - return tp.Provider.MakeAPISwarm(t, ctx, fullIdentity, online, n) -} - -func (tp *TestSuite) makeAPI(t *testing.T, ctx context.Context) (coreiface.CoreAPI, error) { - api, err := tp.makeAPISwarm(t, ctx, false, false, 1) - if err != nil { - return nil, err - } - - return api[0], nil -} - -func (tp *TestSuite) makeAPIWithIdentityAndOffline(t *testing.T, ctx context.Context) (coreiface.CoreAPI, error) { - api, err := tp.makeAPISwarm(t, ctx, true, false, 1) - if err != nil { - return nil, err - } - - return api[0], nil -} - -func (tp *TestSuite) MakeAPISwarm(t *testing.T, ctx context.Context, n int) ([]coreiface.CoreAPI, error) { - return tp.makeAPISwarm(t, ctx, true, true, n) -} - -type TestSuite struct { - Provider - - apis chan int -} - -func TestApi(p Provider) func(t *testing.T) { - running := 1 - apis := make(chan int) - zeroRunning := make(chan struct{}) - go func() { - for i := range apis { - running += i - if running < 1 { - close(zeroRunning) - return - } - } - }() - - tp := &TestSuite{Provider: p, apis: apis} - - return func(t *testing.T) { - t.Run("Block", tp.TestBlock) - t.Run("Dag", tp.TestDag) - t.Run("Dht", tp.TestDht) - t.Run("Key", tp.TestKey) - t.Run("Name", tp.TestName) - t.Run("Object", tp.TestObject) - t.Run("Path", tp.TestPath) - t.Run("Pin", tp.TestPin) - t.Run("PubSub", tp.TestPubSub) - t.Run("Routing", tp.TestRouting) - t.Run("Unixfs", tp.TestUnixfs) - - apis <- -1 - t.Run("TestsCancelCtx", func(t *testing.T) { - select { - case <-zeroRunning: - case <-time.After(time.Second): - t.Errorf("%d test swarms(s) not closed", running) - } - }) - } -} - -func (tp *TestSuite) hasApi(t *testing.T, tf func(coreiface.CoreAPI) error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - if err := tf(api); err != nil { - t.Fatal(api) - } -} diff --git a/coreiface/tests/block.go b/coreiface/tests/block.go deleted file mode 100644 index 5dcb16e4f..000000000 --- a/coreiface/tests/block.go +++ /dev/null @@ -1,354 +0,0 @@ -package tests - -import ( - "bytes" - "context" - "io" - "strings" - "testing" - - coreiface "github.com/ipfs/boxo/coreiface" - opt "github.com/ipfs/boxo/coreiface/options" - "github.com/ipfs/boxo/coreiface/path" - ipld "github.com/ipfs/go-ipld-format" - - mh "github.com/multiformats/go-multihash" -) - -var ( - pbCidV0 = "QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN" // dag-pb - pbCid = "bafybeiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4" // dag-pb - rawCid = "bafkreiffndsajwhk3lwjewwdxqntmjm4b5wxaaanokonsggenkbw6slwk4" // raw bytes - cborCid = "bafyreicnga62zhxnmnlt6ymq5hcbsg7gdhqdu6z4ehu3wpjhvqnflfy6nm" // dag-cbor - cborKCid = "bafyr2qgsohbwdlk7ajmmbb4lhoytmest4wdbe5xnexfvtxeatuyqqmwv3fgxp3pmhpc27gwey2cct56gloqefoqwcf3yqiqzsaqb7p4jefhcw" // dag-cbor keccak-512 -) - -// dag-pb -func pbBlock() io.Reader { - return bytes.NewReader([]byte{10, 12, 8, 2, 18, 6, 104, 101, 108, 108, 111, 10, 24, 6}) -} - -// dag-cbor -func cborBlock() io.Reader { - return bytes.NewReader([]byte{101, 72, 101, 108, 108, 111}) -} - -func (tp *TestSuite) TestBlock(t *testing.T) { - tp.hasApi(t, func(api coreiface.CoreAPI) error { - if api.Block() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestBlockPut (get raw CIDv1)", tp.TestBlockPut) - t.Run("TestBlockPutCidCodec: dag-pb", tp.TestBlockPutCidCodecDagPb) - t.Run("TestBlockPutCidCodec: dag-cbor", tp.TestBlockPutCidCodecDagCbor) - t.Run("TestBlockPutFormat (legacy): cbor → dag-cbor", tp.TestBlockPutFormatDagCbor) - t.Run("TestBlockPutFormat (legacy): protobuf → dag-pb", tp.TestBlockPutFormatDagPb) - t.Run("TestBlockPutFormat (legacy): v0 → CIDv0", tp.TestBlockPutFormatV0) - t.Run("TestBlockPutHash", tp.TestBlockPutHash) - t.Run("TestBlockGet", tp.TestBlockGet) - t.Run("TestBlockRm", tp.TestBlockRm) - t.Run("TestBlockStat", tp.TestBlockStat) - t.Run("TestBlockPin", tp.TestBlockPin) -} - -// when no opts are passed, produced CID has 'raw' codec -func (tp *TestSuite) TestBlockPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, pbBlock()) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != rawCid { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -// Format is deprecated, it used invalid codec names. -// Confirm 'cbor' gets fixed to 'dag-cbor' -func (tp *TestSuite) TestBlockPutFormatDagCbor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, cborBlock(), opt.Block.Format("cbor")) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != cborCid { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -// Format is deprecated, it used invalid codec names. -// Confirm 'protobuf' got fixed to 'dag-pb' -func (tp *TestSuite) TestBlockPutFormatDagPb(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, pbBlock(), opt.Block.Format("protobuf")) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != pbCid { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -// Format is deprecated, it used invalid codec names. -// Confirm fake codec 'v0' got fixed to CIDv0 (with implicit dag-pb codec) -func (tp *TestSuite) TestBlockPutFormatV0(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, pbBlock(), opt.Block.Format("v0")) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != pbCidV0 { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -func (tp *TestSuite) TestBlockPutCidCodecDagCbor(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, cborBlock(), opt.Block.CidCodec("dag-cbor")) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != cborCid { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -func (tp *TestSuite) TestBlockPutCidCodecDagPb(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, pbBlock(), opt.Block.CidCodec("dag-pb")) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != pbCid { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -func (tp *TestSuite) TestBlockPutHash(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put( - ctx, - cborBlock(), - opt.Block.Hash(mh.KECCAK_512, -1), - opt.Block.CidCodec("dag-cbor"), - ) - if err != nil { - t.Fatal(err) - } - - if res.Path().Cid().String() != cborKCid { - t.Errorf("got wrong cid: %s", res.Path().Cid().String()) - } -} - -func (tp *TestSuite) TestBlockGet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) - if err != nil { - t.Fatal(err) - } - - r, err := api.Block().Get(ctx, res.Path()) - if err != nil { - t.Fatal(err) - } - - d, err := io.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if string(d) != "Hello" { - t.Error("didn't get correct data back") - } - - p := path.New("/ipfs/" + res.Path().Cid().String()) - - rp, err := api.ResolvePath(ctx, p) - if err != nil { - t.Fatal(err) - } - if rp.Cid().String() != res.Path().Cid().String() { - t.Error("paths didn't match") - } -} - -func (tp *TestSuite) TestBlockRm(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) - if err != nil { - t.Fatal(err) - } - - r, err := api.Block().Get(ctx, res.Path()) - if err != nil { - t.Fatal(err) - } - - d, err := io.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if string(d) != "Hello" { - t.Error("didn't get correct data back") - } - - err = api.Block().Rm(ctx, res.Path()) - if err != nil { - t.Fatal(err) - } - - _, err = api.Block().Get(ctx, res.Path()) - if err == nil { - t.Fatal("expected err to exist") - } - if !ipld.IsNotFound(err) { - t.Errorf("unexpected error; %s", err.Error()) - } - - err = api.Block().Rm(ctx, res.Path()) - if err == nil { - t.Fatal("expected err to exist") - } - if !ipld.IsNotFound(err) { - t.Errorf("unexpected error; %s", err.Error()) - } - - err = api.Block().Rm(ctx, res.Path(), opt.Block.Force(true)) - if err != nil { - t.Fatal(err) - } -} - -func (tp *TestSuite) TestBlockStat(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - res, err := api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) - if err != nil { - t.Fatal(err) - } - - stat, err := api.Block().Stat(ctx, res.Path()) - if err != nil { - t.Fatal(err) - } - - if stat.Path().String() != res.Path().String() { - t.Error("paths don't match") - } - - if stat.Size() != len("Hello") { - t.Error("length doesn't match") - } -} - -func (tp *TestSuite) TestBlockPin(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Block().Put(ctx, strings.NewReader(`Hello`), opt.Block.Format("raw")) - if err != nil { - t.Fatal(err) - } - - if pins, err := api.Pin().Ls(ctx); err != nil || len(pins) != 0 { - t.Fatal("expected 0 pins") - } - - res, err := api.Block().Put( - ctx, - strings.NewReader(`Hello`), - opt.Block.Pin(true), - opt.Block.Format("raw"), - ) - if err != nil { - t.Fatal(err) - } - - pins, err := accPins(api.Pin().Ls(ctx)) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 { - t.Fatal("expected 1 pin") - } - if pins[0].Type() != "recursive" { - t.Error("expected a recursive pin") - } - if pins[0].Path().String() != res.Path().String() { - t.Error("pin path didn't match") - } -} diff --git a/coreiface/tests/dag.go b/coreiface/tests/dag.go deleted file mode 100644 index b9a03c8f4..000000000 --- a/coreiface/tests/dag.go +++ /dev/null @@ -1,198 +0,0 @@ -package tests - -import ( - "context" - "math" - gopath "path" - "strings" - "testing" - - path "github.com/ipfs/boxo/coreiface/path" - - coreiface "github.com/ipfs/boxo/coreiface" - - ipldcbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" -) - -func (tp *TestSuite) TestDag(t *testing.T) { - tp.hasApi(t, func(api coreiface.CoreAPI) error { - if api.Dag() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestPut", tp.TestPut) - t.Run("TestPutWithHash", tp.TestPutWithHash) - t.Run("TestPath", tp.TestDagPath) - t.Run("TestTree", tp.TestTree) - t.Run("TestBatch", tp.TestBatch) -} - -var treeExpected = map[string]struct{}{ - "a": {}, - "b": {}, - "c": {}, - "c/d": {}, - "c/e": {}, -} - -func (tp *TestSuite) TestPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`"Hello"`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - err = api.Dag().Add(ctx, nd) - if err != nil { - t.Fatal(err) - } - - if nd.Cid().String() != "bafyreicnga62zhxnmnlt6ymq5hcbsg7gdhqdu6z4ehu3wpjhvqnflfy6nm" { - t.Errorf("got wrong cid: %s", nd.Cid().String()) - } -} - -func (tp *TestSuite) TestPutWithHash(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`"Hello"`), mh.SHA3_256, -1) - if err != nil { - t.Fatal(err) - } - - err = api.Dag().Add(ctx, nd) - if err != nil { - t.Fatal(err) - } - - if nd.Cid().String() != "bafyrmifu7haikttpqqgc5ewvmp76z3z4ebp7h2ph4memw7dq4nt6btmxny" { - t.Errorf("got wrong cid: %s", nd.Cid().String()) - } -} - -func (tp *TestSuite) TestDagPath(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - snd, err := ipldcbor.FromJSON(strings.NewReader(`"foo"`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - err = api.Dag().Add(ctx, snd) - if err != nil { - t.Fatal(err) - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+snd.Cid().String()+`"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - err = api.Dag().Add(ctx, nd) - if err != nil { - t.Fatal(err) - } - - p := path.New(gopath.Join(nd.Cid().String(), "lnk")) - - rp, err := api.ResolvePath(ctx, p) - if err != nil { - t.Fatal(err) - } - - ndd, err := api.Dag().Get(ctx, rp.Cid()) - if err != nil { - t.Fatal(err) - } - - if ndd.Cid().String() != snd.Cid().String() { - t.Errorf("got unexpected cid %s, expected %s", ndd.Cid().String(), snd.Cid().String()) - } -} - -func (tp *TestSuite) TestTree(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`{"a": 123, "b": "foo", "c": {"d": 321, "e": 111}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - err = api.Dag().Add(ctx, nd) - if err != nil { - t.Fatal(err) - } - - res, err := api.Dag().Get(ctx, nd.Cid()) - if err != nil { - t.Fatal(err) - } - - lst := res.Tree("", -1) - if len(lst) != len(treeExpected) { - t.Errorf("tree length of %d doesn't match expected %d", len(lst), len(treeExpected)) - } - - for _, ent := range lst { - if _, ok := treeExpected[ent]; !ok { - t.Errorf("unexpected tree entry %s", ent) - } - } -} - -func (tp *TestSuite) TestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`"Hello"`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if nd.Cid().String() != "bafyreicnga62zhxnmnlt6ymq5hcbsg7gdhqdu6z4ehu3wpjhvqnflfy6nm" { - t.Errorf("got wrong cid: %s", nd.Cid().String()) - } - - _, err = api.Dag().Get(ctx, nd.Cid()) - if err == nil || !strings.Contains(err.Error(), "not found") { - t.Fatal(err) - } - - if err := api.Dag().AddMany(ctx, []ipld.Node{nd}); err != nil { - t.Fatal(err) - } - - _, err = api.Dag().Get(ctx, nd.Cid()) - if err != nil { - t.Fatal(err) - } -} diff --git a/coreiface/tests/dht.go b/coreiface/tests/dht.go deleted file mode 100644 index 3b3ac1d61..000000000 --- a/coreiface/tests/dht.go +++ /dev/null @@ -1,166 +0,0 @@ -package tests - -import ( - "context" - "io" - "testing" - "time" - - iface "github.com/ipfs/boxo/coreiface" - "github.com/ipfs/boxo/coreiface/options" -) - -func (tp *TestSuite) TestDht(t *testing.T) { - tp.hasApi(t, func(api iface.CoreAPI) error { - if api.Dht() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestDhtFindPeer", tp.TestDhtFindPeer) - t.Run("TestDhtFindProviders", tp.TestDhtFindProviders) - t.Run("TestDhtProvide", tp.TestDhtProvide) -} - -func (tp *TestSuite) TestDhtFindPeer(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - apis, err := tp.MakeAPISwarm(t, ctx, 5) - if err != nil { - t.Fatal(err) - } - - self0, err := apis[0].Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - laddrs0, err := apis[0].Swarm().LocalAddrs(ctx) - if err != nil { - t.Fatal(err) - } - if len(laddrs0) != 1 { - t.Fatal("unexpected number of local addrs") - } - - time.Sleep(3 * time.Second) - - pi, err := apis[2].Dht().FindPeer(ctx, self0.ID()) - if err != nil { - t.Fatal(err) - } - - if pi.Addrs[0].String() != laddrs0[0].String() { - t.Errorf("got unexpected address from FindPeer: %s", pi.Addrs[0].String()) - } - - self2, err := apis[2].Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - pi, err = apis[1].Dht().FindPeer(ctx, self2.ID()) - if err != nil { - t.Fatal(err) - } - - laddrs2, err := apis[2].Swarm().LocalAddrs(ctx) - if err != nil { - t.Fatal(err) - } - if len(laddrs2) != 1 { - t.Fatal("unexpected number of local addrs") - } - - if pi.Addrs[0].String() != laddrs2[0].String() { - t.Errorf("got unexpected address from FindPeer: %s", pi.Addrs[0].String()) - } -} - -func (tp *TestSuite) TestDhtFindProviders(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - apis, err := tp.MakeAPISwarm(t, ctx, 5) - if err != nil { - t.Fatal(err) - } - - p, err := addTestObject(ctx, apis[0]) - if err != nil { - t.Fatal(err) - } - - time.Sleep(3 * time.Second) - - out, err := apis[2].Dht().FindProviders(ctx, p, options.Dht.NumProviders(1)) - if err != nil { - t.Fatal(err) - } - - provider := <-out - - self0, err := apis[0].Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - if provider.ID.String() != self0.ID().String() { - t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String()) - } -} - -func (tp *TestSuite) TestDhtProvide(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - apis, err := tp.MakeAPISwarm(t, ctx, 5) - if err != nil { - t.Fatal(err) - } - - off0, err := apis[0].WithOptions(options.Api.Offline(true)) - if err != nil { - t.Fatal(err) - } - - s, err := off0.Block().Put(ctx, &io.LimitedReader{R: rnd, N: 4092}) - if err != nil { - t.Fatal(err) - } - - p := s.Path() - - time.Sleep(3 * time.Second) - - out, err := apis[2].Dht().FindProviders(ctx, p, options.Dht.NumProviders(1)) - if err != nil { - t.Fatal(err) - } - - _, ok := <-out - - if ok { - t.Fatal("did not expect to find any providers") - } - - self0, err := apis[0].Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - err = apis[0].Dht().Provide(ctx, p) - if err != nil { - t.Fatal(err) - } - - out, err = apis[2].Dht().FindProviders(ctx, p, options.Dht.NumProviders(1)) - if err != nil { - t.Fatal(err) - } - - provider := <-out - - if provider.ID.String() != self0.ID().String() { - t.Errorf("got wrong provider: %s != %s", provider.ID.String(), self0.ID().String()) - } -} diff --git a/coreiface/tests/key.go b/coreiface/tests/key.go deleted file mode 100644 index 0b755380e..000000000 --- a/coreiface/tests/key.go +++ /dev/null @@ -1,538 +0,0 @@ -package tests - -import ( - "context" - "strings" - "testing" - - iface "github.com/ipfs/boxo/coreiface" - opt "github.com/ipfs/boxo/coreiface/options" - "github.com/ipfs/go-cid" - mbase "github.com/multiformats/go-multibase" -) - -func (tp *TestSuite) TestKey(t *testing.T) { - tp.hasApi(t, func(api iface.CoreAPI) error { - if api.Key() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestListSelf", tp.TestListSelf) - t.Run("TestRenameSelf", tp.TestRenameSelf) - t.Run("TestRemoveSelf", tp.TestRemoveSelf) - t.Run("TestGenerate", tp.TestGenerate) - t.Run("TestGenerateSize", tp.TestGenerateSize) - t.Run("TestGenerateType", tp.TestGenerateType) - t.Run("TestGenerateExisting", tp.TestGenerateExisting) - t.Run("TestList", tp.TestList) - t.Run("TestRename", tp.TestRename) - t.Run("TestRenameToSelf", tp.TestRenameToSelf) - t.Run("TestRenameToSelfForce", tp.TestRenameToSelfForce) - t.Run("TestRenameOverwriteNoForce", tp.TestRenameOverwriteNoForce) - t.Run("TestRenameOverwrite", tp.TestRenameOverwrite) - t.Run("TestRenameSameNameNoForce", tp.TestRenameSameNameNoForce) - t.Run("TestRenameSameName", tp.TestRenameSameName) - t.Run("TestRemove", tp.TestRemove) -} - -func (tp *TestSuite) TestListSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - return - } - - self, err := api.Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - keys, err := api.Key().List(ctx) - if err != nil { - t.Fatalf("failed to list keys: %s", err) - return - } - - if len(keys) != 1 { - t.Fatalf("there should be 1 key (self), got %d", len(keys)) - return - } - - if keys[0].Name() != "self" { - t.Errorf("expected the key to be called 'self', got '%s'", keys[0].Name()) - } - - if keys[0].Path().String() != "/ipns/"+iface.FormatKeyID(self.ID()) { - t.Errorf("expected the key to have path '/ipns/%s', got '%s'", iface.FormatKeyID(self.ID()), keys[0].Path().String()) - } -} - -func (tp *TestSuite) TestRenameSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - return - } - - _, _, err = api.Key().Rename(ctx, "self", "foo") - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "cannot rename key with name 'self'") { - t.Fatalf("expected error 'cannot rename key with name 'self'', got '%s'", err.Error()) - } - } - - _, _, err = api.Key().Rename(ctx, "self", "foo", opt.Key.Force(true)) - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "cannot rename key with name 'self'") { - t.Fatalf("expected error 'cannot rename key with name 'self'', got '%s'", err.Error()) - } - } -} - -func (tp *TestSuite) TestRemoveSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - return - } - - _, err = api.Key().Remove(ctx, "self") - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "cannot remove key with name 'self'") { - t.Fatalf("expected error 'cannot remove key with name 'self'', got '%s'", err.Error()) - } - } -} - -func (tp *TestSuite) TestGenerate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - k, err := api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - if k.Name() != "foo" { - t.Errorf("expected the key to be called 'foo', got '%s'", k.Name()) - } - - verifyIPNSPath(t, k.Path().String()) -} - -func verifyIPNSPath(t *testing.T, p string) bool { - t.Helper() - if !strings.HasPrefix(p, "/ipns/") { - t.Errorf("path %q does not look like an IPNS path", p) - return false - } - k := p[len("/ipns/"):] - c, err := cid.Decode(k) - if err != nil { - t.Errorf("failed to decode IPNS key %q (%v)", k, err) - return false - } - b36, err := c.StringOfBase(mbase.Base36) - if err != nil { - t.Fatalf("cid cannot format itself in b36") - return false - } - if b36 != k { - t.Errorf("IPNS key is not base36") - } - return true -} - -func (tp *TestSuite) TestGenerateSize(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - k, err := api.Key().Generate(ctx, "foo", opt.Key.Size(2048)) - if err != nil { - t.Fatal(err) - return - } - - if k.Name() != "foo" { - t.Errorf("expected the key to be called 'foo', got '%s'", k.Name()) - } - - verifyIPNSPath(t, k.Path().String()) -} - -func (tp *TestSuite) TestGenerateType(t *testing.T) { - t.Skip("disabled until libp2p/specs#111 is fixed") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - k, err := api.Key().Generate(ctx, "bar", opt.Key.Type(opt.Ed25519Key)) - if err != nil { - t.Fatal(err) - return - } - - if k.Name() != "bar" { - t.Errorf("expected the key to be called 'foo', got '%s'", k.Name()) - } - - // Expected to be an inlined identity hash. - if !strings.HasPrefix(k.Path().String(), "/ipns/12") { - t.Errorf("expected the key to be prefixed with '/ipns/12', got '%s'", k.Path().String()) - } -} - -func (tp *TestSuite) TestGenerateExisting(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - _, err = api.Key().Generate(ctx, "foo") - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "key with name 'foo' already exists") { - t.Fatalf("expected error 'key with name 'foo' already exists', got '%s'", err.Error()) - } - } - - _, err = api.Key().Generate(ctx, "self") - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "cannot create key with name 'self'") { - t.Fatalf("expected error 'cannot create key with name 'self'', got '%s'", err.Error()) - } - } -} - -func (tp *TestSuite) TestList(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - l, err := api.Key().List(ctx) - if err != nil { - t.Fatal(err) - return - } - - if len(l) != 2 { - t.Fatalf("expected to get 2 keys, got %d", len(l)) - return - } - - if l[0].Name() != "self" { - t.Fatalf("expected key 0 to be called 'self', got '%s'", l[0].Name()) - return - } - - if l[1].Name() != "foo" { - t.Fatalf("expected key 1 to be called 'foo', got '%s'", l[1].Name()) - return - } - - verifyIPNSPath(t, l[0].Path().String()) - verifyIPNSPath(t, l[1].Path().String()) -} - -func (tp *TestSuite) TestRename(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - k, overwrote, err := api.Key().Rename(ctx, "foo", "bar") - if err != nil { - t.Fatal(err) - return - } - - if overwrote { - t.Error("overwrote should be false") - } - - if k.Name() != "bar" { - t.Errorf("returned key should be called 'bar', got '%s'", k.Name()) - } -} - -func (tp *TestSuite) TestRenameToSelf(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - _, _, err = api.Key().Rename(ctx, "foo", "self") - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "cannot overwrite key with name 'self'") { - t.Fatalf("expected error 'cannot overwrite key with name 'self'', got '%s'", err.Error()) - } - } -} - -func (tp *TestSuite) TestRenameToSelfForce(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - _, _, err = api.Key().Rename(ctx, "foo", "self", opt.Key.Force(true)) - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "cannot overwrite key with name 'self'") { - t.Fatalf("expected error 'cannot overwrite key with name 'self'', got '%s'", err.Error()) - } - } -} - -func (tp *TestSuite) TestRenameOverwriteNoForce(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - _, err = api.Key().Generate(ctx, "bar") - if err != nil { - t.Fatal(err) - return - } - - _, _, err = api.Key().Rename(ctx, "foo", "bar") - if err == nil { - t.Error("expected error to not be nil") - } else { - if !strings.Contains(err.Error(), "key by that name already exists, refusing to overwrite") { - t.Fatalf("expected error 'key by that name already exists, refusing to overwrite', got '%s'", err.Error()) - } - } -} - -func (tp *TestSuite) TestRenameOverwrite(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - kfoo, err := api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - _, err = api.Key().Generate(ctx, "bar") - if err != nil { - t.Fatal(err) - return - } - - k, overwrote, err := api.Key().Rename(ctx, "foo", "bar", opt.Key.Force(true)) - if err != nil { - t.Fatal(err) - return - } - - if !overwrote { - t.Error("overwrote should be true") - } - - if k.Name() != "bar" { - t.Errorf("returned key should be called 'bar', got '%s'", k.Name()) - } - - if k.Path().String() != kfoo.Path().String() { - t.Errorf("k and kfoo should have equal paths, '%s'!='%s'", k.Path().String(), kfoo.Path().String()) - } -} - -func (tp *TestSuite) TestRenameSameNameNoForce(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - k, overwrote, err := api.Key().Rename(ctx, "foo", "foo") - if err != nil { - t.Fatal(err) - return - } - - if overwrote { - t.Error("overwrote should be false") - } - - if k.Name() != "foo" { - t.Errorf("returned key should be called 'foo', got '%s'", k.Name()) - } -} - -func (tp *TestSuite) TestRenameSameName(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - k, overwrote, err := api.Key().Rename(ctx, "foo", "foo", opt.Key.Force(true)) - if err != nil { - t.Fatal(err) - return - } - - if overwrote { - t.Error("overwrote should be false") - } - - if k.Name() != "foo" { - t.Errorf("returned key should be called 'foo', got '%s'", k.Name()) - } -} - -func (tp *TestSuite) TestRemove(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - k, err := api.Key().Generate(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - l, err := api.Key().List(ctx) - if err != nil { - t.Fatal(err) - return - } - - if len(l) != 2 { - t.Fatalf("expected to get 2 keys, got %d", len(l)) - return - } - - p, err := api.Key().Remove(ctx, "foo") - if err != nil { - t.Fatal(err) - return - } - - if k.Path().String() != p.Path().String() { - t.Errorf("k and p should have equal paths, '%s'!='%s'", k.Path().String(), p.Path().String()) - } - - l, err = api.Key().List(ctx) - if err != nil { - t.Fatal(err) - return - } - - if len(l) != 1 { - t.Fatalf("expected to get 1 key, got %d", len(l)) - return - } - - if l[0].Name() != "self" { - t.Errorf("expected the key to be called 'self', got '%s'", l[0].Name()) - } -} diff --git a/coreiface/tests/name.go b/coreiface/tests/name.go deleted file mode 100644 index 74d88edff..000000000 --- a/coreiface/tests/name.go +++ /dev/null @@ -1,167 +0,0 @@ -package tests - -import ( - "context" - "io" - "math/rand" - gopath "path" - "testing" - "time" - - coreiface "github.com/ipfs/boxo/coreiface" - opt "github.com/ipfs/boxo/coreiface/options" - path "github.com/ipfs/boxo/coreiface/path" - "github.com/ipfs/boxo/files" - "github.com/ipfs/boxo/ipns" - "github.com/stretchr/testify/require" -) - -func (tp *TestSuite) TestName(t *testing.T) { - tp.hasApi(t, func(api coreiface.CoreAPI) error { - if api.Name() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestPublishResolve", tp.TestPublishResolve) - t.Run("TestBasicPublishResolveKey", tp.TestBasicPublishResolveKey) - t.Run("TestBasicPublishResolveTimeout", tp.TestBasicPublishResolveTimeout) -} - -var rnd = rand.New(rand.NewSource(0x62796532303137)) - -func addTestObject(ctx context.Context, api coreiface.CoreAPI) (path.Path, error) { - return api.Unixfs().Add(ctx, files.NewReaderFile(&io.LimitedReader{R: rnd, N: 4092})) -} - -func appendPath(p path.Path, sub string) path.Path { - return path.New(gopath.Join(p.String(), sub)) -} - -func (tp *TestSuite) TestPublishResolve(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - init := func() (coreiface.CoreAPI, path.Path) { - apis, err := tp.MakeAPISwarm(t, ctx, 5) - require.NoError(t, err) - api := apis[0] - - p, err := addTestObject(ctx, api) - require.NoError(t, err) - return api, p - } - run := func(t *testing.T, ropts []opt.NameResolveOption) { - t.Run("basic", func(t *testing.T) { - api, p := init() - name, err := api.Name().Publish(ctx, p) - require.NoError(t, err) - - self, err := api.Key().Self(ctx) - require.NoError(t, err) - require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - - resPath, err := api.Name().Resolve(ctx, name.String(), ropts...) - require.NoError(t, err) - require.Equal(t, p.String(), resPath.String()) - }) - - t.Run("publishPath", func(t *testing.T) { - api, p := init() - name, err := api.Name().Publish(ctx, appendPath(p, "/test")) - require.NoError(t, err) - - self, err := api.Key().Self(ctx) - require.NoError(t, err) - require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - - resPath, err := api.Name().Resolve(ctx, name.String(), ropts...) - require.NoError(t, err) - require.Equal(t, p.String()+"/test", resPath.String()) - }) - - t.Run("revolvePath", func(t *testing.T) { - api, p := init() - name, err := api.Name().Publish(ctx, p) - require.NoError(t, err) - - self, err := api.Key().Self(ctx) - require.NoError(t, err) - require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - - resPath, err := api.Name().Resolve(ctx, name.String()+"/test", ropts...) - require.NoError(t, err) - require.Equal(t, p.String()+"/test", resPath.String()) - }) - - t.Run("publishRevolvePath", func(t *testing.T) { - api, p := init() - name, err := api.Name().Publish(ctx, appendPath(p, "/a")) - require.NoError(t, err) - - self, err := api.Key().Self(ctx) - require.NoError(t, err) - require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - - resPath, err := api.Name().Resolve(ctx, name.String()+"/b", ropts...) - require.NoError(t, err) - require.Equal(t, p.String()+"/a/b", resPath.String()) - }) - } - - t.Run("default", func(t *testing.T) { - run(t, []opt.NameResolveOption{}) - }) - - t.Run("nocache", func(t *testing.T) { - run(t, []opt.NameResolveOption{opt.Name.Cache(false)}) - }) -} - -func (tp *TestSuite) TestBasicPublishResolveKey(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - apis, err := tp.MakeAPISwarm(t, ctx, 5) - require.NoError(t, err) - api := apis[0] - - k, err := api.Key().Generate(ctx, "foo") - require.NoError(t, err) - - p, err := addTestObject(ctx, api) - require.NoError(t, err) - - name, err := api.Name().Publish(ctx, p, opt.Name.Key(k.Name())) - require.NoError(t, err) - require.Equal(t, name.String(), ipns.NameFromPeer(k.ID()).String()) - - resPath, err := api.Name().Resolve(ctx, name.String()) - require.NoError(t, err) - require.Equal(t, p.String(), resPath.String()) -} - -func (tp *TestSuite) TestBasicPublishResolveTimeout(t *testing.T) { - t.Skip("ValidTime doesn't appear to work at this time resolution") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - apis, err := tp.MakeAPISwarm(t, ctx, 5) - require.NoError(t, err) - api := apis[0] - p, err := addTestObject(ctx, api) - require.NoError(t, err) - - self, err := api.Key().Self(ctx) - require.NoError(t, err) - - name, err := api.Name().Publish(ctx, p, opt.Name.ValidTime(time.Millisecond*100)) - require.NoError(t, err) - require.Equal(t, name.String(), ipns.NameFromPeer(self.ID()).String()) - - time.Sleep(time.Second) - - _, err = api.Name().Resolve(ctx, name.String()) - require.NoError(t, err) -} - -// TODO: When swarm api is created, add multinode tests diff --git a/coreiface/tests/object.go b/coreiface/tests/object.go deleted file mode 100644 index 63c218eb3..000000000 --- a/coreiface/tests/object.go +++ /dev/null @@ -1,467 +0,0 @@ -package tests - -import ( - "bytes" - "context" - "encoding/hex" - "io" - "strings" - "testing" - - iface "github.com/ipfs/boxo/coreiface" - opt "github.com/ipfs/boxo/coreiface/options" -) - -func (tp *TestSuite) TestObject(t *testing.T) { - tp.hasApi(t, func(api iface.CoreAPI) error { - if api.Object() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestNew", tp.TestNew) - t.Run("TestObjectPut", tp.TestObjectPut) - t.Run("TestObjectGet", tp.TestObjectGet) - t.Run("TestObjectData", tp.TestObjectData) - t.Run("TestObjectLinks", tp.TestObjectLinks) - t.Run("TestObjectStat", tp.TestObjectStat) - t.Run("TestObjectAddLink", tp.TestObjectAddLink) - t.Run("TestObjectAddLinkCreate", tp.TestObjectAddLinkCreate) - t.Run("TestObjectRmLink", tp.TestObjectRmLink) - t.Run("TestObjectAddData", tp.TestObjectAddData) - t.Run("TestObjectSetData", tp.TestObjectSetData) - t.Run("TestDiffTest", tp.TestDiffTest) -} - -func (tp *TestSuite) TestNew(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - emptyNode, err := api.Object().New(ctx) - if err != nil { - t.Fatal(err) - } - - dirNode, err := api.Object().New(ctx, opt.Object.Type("unixfs-dir")) - if err != nil { - t.Fatal(err) - } - - if emptyNode.String() != "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" { - t.Errorf("Unexpected emptyNode path: %s", emptyNode.String()) - } - - if dirNode.String() != "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" { - t.Errorf("Unexpected dirNode path: %s", dirNode.String()) - } -} - -func (tp *TestSuite) TestObjectPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"YmFy"}`), opt.Object.DataType("base64")) // bar - if err != nil { - t.Fatal(err) - } - - pbBytes, err := hex.DecodeString("0a0362617a") - if err != nil { - t.Fatal(err) - } - - p3, err := api.Object().Put(ctx, bytes.NewReader(pbBytes), opt.Object.InputEnc("protobuf")) - if err != nil { - t.Fatal(err) - } - - if p1.String() != "/ipfs/QmQeGyS87nyijii7kFt1zbe4n2PsXTFimzsdxyE9qh9TST" { - t.Errorf("unexpected path: %s", p1.String()) - } - - if p2.String() != "/ipfs/QmNeYRbCibmaMMK6Du6ChfServcLqFvLJF76PzzF76SPrZ" { - t.Errorf("unexpected path: %s", p2.String()) - } - - if p3.String() != "/ipfs/QmZreR7M2t7bFXAdb1V5FtQhjk4t36GnrvueLJowJbQM9m" { - t.Errorf("unexpected path: %s", p3.String()) - } -} - -func (tp *TestSuite) TestObjectGet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - nd, err := api.Object().Get(ctx, p1) - if err != nil { - t.Fatal(err) - } - - if string(nd.RawData()[len(nd.RawData())-3:]) != "foo" { - t.Fatal("got non-matching data") - } -} - -func (tp *TestSuite) TestObjectData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - r, err := api.Object().Data(ctx, p1) - if err != nil { - t.Fatal(err) - } - - data, err := io.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if string(data) != "foo" { - t.Fatal("got non-matching data") - } -} - -func (tp *TestSuite) TestObjectLinks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`"}]}`)) - if err != nil { - t.Fatal(err) - } - - links, err := api.Object().Links(ctx, p2) - if err != nil { - t.Fatal(err) - } - - if len(links) != 1 { - t.Errorf("unexpected number of links: %d", len(links)) - } - - if links[0].Cid.String() != p1.Cid().String() { - t.Fatal("cids didn't batch") - } - - if links[0].Name != "bar" { - t.Fatal("unexpected link name") - } -} - -func (tp *TestSuite) TestObjectStat(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) - if err != nil { - t.Fatal(err) - } - - stat, err := api.Object().Stat(ctx, p2) - if err != nil { - t.Fatal(err) - } - - if stat.Cid.String() != p2.Cid().String() { - t.Error("unexpected stat.Cid") - } - - if stat.NumLinks != 1 { - t.Errorf("unexpected stat.NumLinks") - } - - if stat.BlockSize != 51 { - t.Error("unexpected stat.BlockSize") - } - - if stat.LinksSize != 47 { - t.Errorf("unexpected stat.LinksSize: %d", stat.LinksSize) - } - - if stat.DataSize != 4 { - t.Error("unexpected stat.DataSize") - } - - if stat.CumulativeSize != 54 { - t.Error("unexpected stat.DataSize") - } -} - -func (tp *TestSuite) TestObjectAddLink(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) - if err != nil { - t.Fatal(err) - } - - p3, err := api.Object().AddLink(ctx, p2, "abc", p2) - if err != nil { - t.Fatal(err) - } - - links, err := api.Object().Links(ctx, p3) - if err != nil { - t.Fatal(err) - } - - if len(links) != 2 { - t.Errorf("unexpected number of links: %d", len(links)) - } - - if links[0].Name != "abc" { - t.Errorf("unexpected link 0 name: %s", links[0].Name) - } - - if links[1].Name != "bar" { - t.Errorf("unexpected link 1 name: %s", links[1].Name) - } -} - -func (tp *TestSuite) TestObjectAddLinkCreate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) - if err != nil { - t.Fatal(err) - } - - _, err = api.Object().AddLink(ctx, p2, "abc/d", p2) - if err == nil { - t.Fatal("expected an error") - } - if !strings.Contains(err.Error(), "no link by that name") { - t.Fatalf("unexpected error: %s", err.Error()) - } - - p3, err := api.Object().AddLink(ctx, p2, "abc/d", p2, opt.Object.Create(true)) - if err != nil { - t.Fatal(err) - } - - links, err := api.Object().Links(ctx, p3) - if err != nil { - t.Fatal(err) - } - - if len(links) != 2 { - t.Errorf("unexpected number of links: %d", len(links)) - } - - if links[0].Name != "abc" { - t.Errorf("unexpected link 0 name: %s", links[0].Name) - } - - if links[1].Name != "bar" { - t.Errorf("unexpected link 1 name: %s", links[1].Name) - } -} - -func (tp *TestSuite) TestObjectRmLink(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bazz", "Links":[{"Name":"bar", "Hash":"`+p1.Cid().String()+`", "Size":3}]}`)) - if err != nil { - t.Fatal(err) - } - - p3, err := api.Object().RmLink(ctx, p2, "bar") - if err != nil { - t.Fatal(err) - } - - links, err := api.Object().Links(ctx, p3) - if err != nil { - t.Fatal(err) - } - - if len(links) != 0 { - t.Errorf("unexpected number of links: %d", len(links)) - } -} - -func (tp *TestSuite) TestObjectAddData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().AppendData(ctx, p1, strings.NewReader("bar")) - if err != nil { - t.Fatal(err) - } - - r, err := api.Object().Data(ctx, p2) - if err != nil { - t.Fatal(err) - } - - data, err := io.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if string(data) != "foobar" { - t.Error("unexpected data") - } -} - -func (tp *TestSuite) TestObjectSetData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().SetData(ctx, p1, strings.NewReader("bar")) - if err != nil { - t.Fatal(err) - } - - r, err := api.Object().Data(ctx, p2) - if err != nil { - t.Fatal(err) - } - - data, err := io.ReadAll(r) - if err != nil { - t.Fatal(err) - } - - if string(data) != "bar" { - t.Error("unexpected data") - } -} - -func (tp *TestSuite) TestDiffTest(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"foo"}`)) - if err != nil { - t.Fatal(err) - } - - p2, err := api.Object().Put(ctx, strings.NewReader(`{"Data":"bar"}`)) - if err != nil { - t.Fatal(err) - } - - changes, err := api.Object().Diff(ctx, p1, p2) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 1 { - t.Fatal("unexpected changes len") - } - - if changes[0].Type != iface.DiffMod { - t.Fatal("unexpected change type") - } - - if changes[0].Before.String() != p1.String() { - t.Fatal("unexpected before path") - } - - if changes[0].After.String() != p2.String() { - t.Fatal("unexpected before path") - } -} diff --git a/coreiface/tests/path.go b/coreiface/tests/path.go deleted file mode 100644 index 06f3aa1f8..000000000 --- a/coreiface/tests/path.go +++ /dev/null @@ -1,197 +0,0 @@ -package tests - -import ( - "context" - "math" - "strings" - "testing" - - "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/coreiface/options" - - ipldcbor "github.com/ipfs/go-ipld-cbor" -) - -func (tp *TestSuite) TestPath(t *testing.T) { - t.Run("TestMutablePath", tp.TestMutablePath) - t.Run("TestPathRemainder", tp.TestPathRemainder) - t.Run("TestEmptyPathRemainder", tp.TestEmptyPathRemainder) - t.Run("TestInvalidPathRemainder", tp.TestInvalidPathRemainder) - t.Run("TestPathRoot", tp.TestPathRoot) - t.Run("TestPathJoin", tp.TestPathJoin) -} - -func (tp *TestSuite) TestMutablePath(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - blk, err := api.Block().Put(ctx, strings.NewReader(`foo`)) - if err != nil { - t.Fatal(err) - } - - if blk.Path().Mutable() { - t.Error("expected /ipld path to be immutable") - } - - // get self /ipns path - - if api.Key() == nil { - t.Fatal(".Key not implemented") - } - - keys, err := api.Key().List(ctx) - if err != nil { - t.Fatal(err) - } - - if !keys[0].Path().Mutable() { - t.Error("expected self /ipns path to be mutable") - } -} - -func (tp *TestSuite) TestPathRemainder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - if api.Dag() == nil { - t.Fatal(".Dag not implemented") - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"bar": "baz"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if err := api.Dag().Add(ctx, nd); err != nil { - t.Fatal(err) - } - - rp1, err := api.ResolvePath(ctx, path.New(nd.String()+"/foo/bar")) - if err != nil { - t.Fatal(err) - } - - if rp1.Remainder() != "foo/bar" { - t.Error("expected to get path remainder") - } -} - -func (tp *TestSuite) TestEmptyPathRemainder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - if api.Dag() == nil { - t.Fatal(".Dag not implemented") - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"bar": "baz"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if err := api.Dag().Add(ctx, nd); err != nil { - t.Fatal(err) - } - - rp1, err := api.ResolvePath(ctx, path.New(nd.Cid().String())) - if err != nil { - t.Fatal(err) - } - - if rp1.Remainder() != "" { - t.Error("expected the resolved path to not have a remainder") - } -} - -func (tp *TestSuite) TestInvalidPathRemainder(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - if api.Dag() == nil { - t.Fatal(".Dag not implemented") - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"bar": "baz"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if err := api.Dag().Add(ctx, nd); err != nil { - t.Fatal(err) - } - - _, err = api.ResolvePath(ctx, path.New("/ipld/"+nd.Cid().String()+"/bar/baz")) - if err == nil || !strings.Contains(err.Error(), `no link named "bar"`) { - t.Fatalf("unexpected error: %s", err) - } -} - -func (tp *TestSuite) TestPathRoot(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - if api.Block() == nil { - t.Fatal(".Block not implemented") - } - - blk, err := api.Block().Put(ctx, strings.NewReader(`foo`), options.Block.Format("raw")) - if err != nil { - t.Fatal(err) - } - - if api.Dag() == nil { - t.Fatal(".Dag not implemented") - } - - nd, err := ipldcbor.FromJSON(strings.NewReader(`{"foo": {"/": "`+blk.Path().Cid().String()+`"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if err := api.Dag().Add(ctx, nd); err != nil { - t.Fatal(err) - } - - rp, err := api.ResolvePath(ctx, path.New("/ipld/"+nd.Cid().String()+"/foo")) - if err != nil { - t.Fatal(err) - } - - if rp.Root().String() != nd.Cid().String() { - t.Error("unexpected path root") - } - - if rp.Cid().String() != blk.Path().Cid().String() { - t.Error("unexpected path cid") - } -} - -func (tp *TestSuite) TestPathJoin(t *testing.T) { - p1 := path.New("/ipfs/QmYNmQKp6SuaVrpgWRsPTgCQCnpxUYGq76YEKBXuj2N4H6/bar/baz") - - if path.Join(p1, "foo").String() != "/ipfs/QmYNmQKp6SuaVrpgWRsPTgCQCnpxUYGq76YEKBXuj2N4H6/bar/baz/foo" { - t.Error("unexpected path") - } -} diff --git a/coreiface/tests/pin.go b/coreiface/tests/pin.go deleted file mode 100644 index 4b0fea01d..000000000 --- a/coreiface/tests/pin.go +++ /dev/null @@ -1,604 +0,0 @@ -package tests - -import ( - "context" - "math" - "strings" - "testing" - - iface "github.com/ipfs/boxo/coreiface" - opt "github.com/ipfs/boxo/coreiface/options" - "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/go-cid" - ipldcbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" -) - -func (tp *TestSuite) TestPin(t *testing.T) { - tp.hasApi(t, func(api iface.CoreAPI) error { - if api.Pin() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestPinAdd", tp.TestPinAdd) - t.Run("TestPinSimple", tp.TestPinSimple) - t.Run("TestPinRecursive", tp.TestPinRecursive) - t.Run("TestPinLsIndirect", tp.TestPinLsIndirect) - t.Run("TestPinLsPrecedence", tp.TestPinLsPrecedence) - t.Run("TestPinIsPinned", tp.TestPinIsPinned) -} - -func (tp *TestSuite) TestPinAdd(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p, err := api.Unixfs().Add(ctx, strFile("foo")()) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, p) - if err != nil { - t.Fatal(err) - } -} - -func (tp *TestSuite) TestPinSimple(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p, err := api.Unixfs().Add(ctx, strFile("foo")()) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, p) - if err != nil { - t.Fatal(err) - } - - list, err := accPins(api.Pin().Ls(ctx)) - if err != nil { - t.Fatal(err) - } - - if len(list) != 1 { - t.Errorf("unexpected pin list len: %d", len(list)) - } - - if list[0].Path().Cid().String() != p.Cid().String() { - t.Error("paths don't match") - } - - if list[0].Type() != "recursive" { - t.Error("unexpected pin type") - } - - assertIsPinned(t, ctx, api, p, "recursive") - - err = api.Pin().Rm(ctx, p) - if err != nil { - t.Fatal(err) - } - - list, err = accPins(api.Pin().Ls(ctx)) - if err != nil { - t.Fatal(err) - } - - if len(list) != 0 { - t.Errorf("unexpected pin list len: %d", len(list)) - } -} - -func (tp *TestSuite) TestPinRecursive(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p0, err := api.Unixfs().Add(ctx, strFile("foo")()) - if err != nil { - t.Fatal(err) - } - - p1, err := api.Unixfs().Add(ctx, strFile("bar")()) - if err != nil { - t.Fatal(err) - } - - nd2, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+p0.Cid().String()+`"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - nd3, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+p1.Cid().String()+`"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if err := api.Dag().AddMany(ctx, []ipld.Node{nd2, nd3}); err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(nd2.Cid())) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(nd3.Cid()), opt.Pin.Recursive(false)) - if err != nil { - t.Fatal(err) - } - - list, err := accPins(api.Pin().Ls(ctx)) - if err != nil { - t.Fatal(err) - } - - if len(list) != 3 { - t.Errorf("unexpected pin list len: %d", len(list)) - } - - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Direct())) - if err != nil { - t.Fatal(err) - } - - if len(list) != 1 { - t.Errorf("unexpected pin list len: %d", len(list)) - } - - if list[0].Path().String() != path.IpldPath(nd3.Cid()).String() { - t.Errorf("unexpected path, %s != %s", list[0].Path().String(), path.IpfsPath(nd3.Cid()).String()) - } - - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Recursive())) - if err != nil { - t.Fatal(err) - } - - if len(list) != 1 { - t.Errorf("unexpected pin list len: %d", len(list)) - } - - if list[0].Path().String() != path.IpldPath(nd2.Cid()).String() { - t.Errorf("unexpected path, %s != %s", list[0].Path().String(), path.IpldPath(nd2.Cid()).String()) - } - - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Indirect())) - if err != nil { - t.Fatal(err) - } - - if len(list) != 1 { - t.Errorf("unexpected pin list len: %d", len(list)) - } - - if list[0].Path().Cid().String() != p0.Cid().String() { - t.Errorf("unexpected path, %s != %s", list[0].Path().Cid().String(), p0.Cid().String()) - } - - res, err := api.Pin().Verify(ctx) - if err != nil { - t.Fatal(err) - } - n := 0 - for r := range res { - if err := r.Err(); err != nil { - t.Error(err) - } - if !r.Ok() { - t.Error("expected pin to be ok") - } - n++ - } - - if n != 1 { - t.Errorf("unexpected verify result count: %d", n) - } - - // TODO: figure out a way to test verify without touching IpfsNode - /* - err = api.Block().Rm(ctx, p0, opt.Block.Force(true)) - if err != nil { - t.Fatal(err) - } - - res, err = api.Pin().Verify(ctx) - if err != nil { - t.Fatal(err) - } - n = 0 - for r := range res { - if r.Ok() { - t.Error("expected pin to not be ok") - } - - if len(r.BadNodes()) != 1 { - t.Fatalf("unexpected badNodes len") - } - - if r.BadNodes()[0].Path().Cid().String() != p0.Cid().String() { - t.Error("unexpected badNode path") - } - - if r.BadNodes()[0].Err().Error() != "merkledag: not found" { - t.Errorf("unexpected badNode error: %s", r.BadNodes()[0].Err().Error()) - } - n++ - } - - if n != 1 { - t.Errorf("unexpected verify result count: %d", n) - } - */ -} - -// TestPinLsIndirect verifies that indirect nodes are listed by pin ls even if a parent node is directly pinned -func (tp *TestSuite) TestPinLsIndirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "foo") - - err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(false)) - if err != nil { - t.Fatal(err) - } - - assertPinTypes(t, ctx, api, []cidContainer{grandparent}, []cidContainer{parent}, []cidContainer{leaf}) -} - -// TestPinLsPrecedence verifies the precedence of pins (recursive > direct > indirect) -func (tp *TestSuite) TestPinLsPrecedence(t *testing.T) { - // Testing precedence of recursive, direct and indirect pins - // Results should be recursive > indirect, direct > indirect, and recursive > direct - - t.Run("TestPinLsPredenceRecursiveIndirect", tp.TestPinLsPredenceRecursiveIndirect) - t.Run("TestPinLsPrecedenceDirectIndirect", tp.TestPinLsPrecedenceDirectIndirect) - t.Run("TestPinLsPrecedenceRecursiveDirect", tp.TestPinLsPrecedenceRecursiveDirect) -} - -func (tp *TestSuite) TestPinLsPredenceRecursiveIndirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - // Test recursive > indirect - leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "recursive > indirect") - - err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(parent.Cid())) - if err != nil { - t.Fatal(err) - } - - assertPinTypes(t, ctx, api, []cidContainer{grandparent, parent}, []cidContainer{}, []cidContainer{leaf}) -} - -func (tp *TestSuite) TestPinLsPrecedenceDirectIndirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - // Test direct > indirect - leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "direct > indirect") - - err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(false)) - if err != nil { - t.Fatal(err) - } - - assertPinTypes(t, ctx, api, []cidContainer{grandparent}, []cidContainer{parent}, []cidContainer{leaf}) -} - -func (tp *TestSuite) TestPinLsPrecedenceRecursiveDirect(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - // Test recursive > direct - leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "recursive + direct = error") - - err = api.Pin().Add(ctx, path.IpldPath(parent.Cid())) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(false)) - if err == nil { - t.Fatal("expected error directly pinning a recursively pinned node") - } - - assertPinTypes(t, ctx, api, []cidContainer{parent}, []cidContainer{}, []cidContainer{leaf}) - - err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid()), opt.Pin.Recursive(false)) - if err != nil { - t.Fatal(err) - } - - err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid())) - if err != nil { - t.Fatal(err) - } - - assertPinTypes(t, ctx, api, []cidContainer{grandparent, parent}, []cidContainer{}, []cidContainer{leaf}) -} - -func (tp *TestSuite) TestPinIsPinned(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - leaf, parent, grandparent := getThreeChainedNodes(t, ctx, api, "foofoo") - - assertNotPinned(t, ctx, api, path.IpldPath(grandparent.Cid())) - assertNotPinned(t, ctx, api, path.IpldPath(parent.Cid())) - assertNotPinned(t, ctx, api, path.IpldPath(leaf.Cid())) - - err = api.Pin().Add(ctx, path.IpldPath(parent.Cid()), opt.Pin.Recursive(true)) - if err != nil { - t.Fatal(err) - } - - assertNotPinned(t, ctx, api, path.IpldPath(grandparent.Cid())) - assertIsPinned(t, ctx, api, path.IpldPath(parent.Cid()), "recursive") - assertIsPinned(t, ctx, api, path.IpldPath(leaf.Cid()), "indirect") - - err = api.Pin().Add(ctx, path.IpldPath(grandparent.Cid()), opt.Pin.Recursive(false)) - if err != nil { - t.Fatal(err) - } - - assertIsPinned(t, ctx, api, path.IpldPath(grandparent.Cid()), "direct") - assertIsPinned(t, ctx, api, path.IpldPath(parent.Cid()), "recursive") - assertIsPinned(t, ctx, api, path.IpldPath(leaf.Cid()), "indirect") -} - -type cidContainer interface { - Cid() cid.Cid -} - -func getThreeChainedNodes(t *testing.T, ctx context.Context, api iface.CoreAPI, leafData string) (cidContainer, cidContainer, cidContainer) { - leaf, err := api.Unixfs().Add(ctx, strFile(leafData)()) - if err != nil { - t.Fatal(err) - } - - parent, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+leaf.Cid().String()+`"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - grandparent, err := ipldcbor.FromJSON(strings.NewReader(`{"lnk": {"/": "`+parent.Cid().String()+`"}}`), math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - if err := api.Dag().AddMany(ctx, []ipld.Node{parent, grandparent}); err != nil { - t.Fatal(err) - } - - return leaf, parent, grandparent -} - -func assertPinTypes(t *testing.T, ctx context.Context, api iface.CoreAPI, recusive, direct, indirect []cidContainer) { - assertPinLsAllConsistency(t, ctx, api) - - list, err := accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Recursive())) - if err != nil { - t.Fatal(err) - } - - assertPinCids(t, list, recusive...) - - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Direct())) - if err != nil { - t.Fatal(err) - } - - assertPinCids(t, list, direct...) - - list, err = accPins(api.Pin().Ls(ctx, opt.Pin.Ls.Indirect())) - if err != nil { - t.Fatal(err) - } - - assertPinCids(t, list, indirect...) -} - -// assertPinCids verifies that the pins match the expected cids -func assertPinCids(t *testing.T, pins []iface.Pin, cids ...cidContainer) { - t.Helper() - - if expected, actual := len(cids), len(pins); expected != actual { - t.Fatalf("expected pin list to have len %d, was %d", expected, actual) - } - - cSet := cid.NewSet() - for _, c := range cids { - cSet.Add(c.Cid()) - } - - valid := true - for _, p := range pins { - c := p.Path().Cid() - if cSet.Has(c) { - cSet.Remove(c) - } else { - valid = false - break - } - } - - valid = valid && cSet.Len() == 0 - - if !valid { - pinStrs := make([]string, len(pins)) - for i, p := range pins { - pinStrs[i] = p.Path().Cid().String() - } - pathStrs := make([]string, len(cids)) - for i, c := range cids { - pathStrs[i] = c.Cid().String() - } - t.Fatalf("expected: %s \nactual: %s", strings.Join(pathStrs, ", "), strings.Join(pinStrs, ", ")) - } -} - -// assertPinLsAllConsistency verifies that listing all pins gives the same result as listing the pin types individually -func assertPinLsAllConsistency(t *testing.T, ctx context.Context, api iface.CoreAPI) { - t.Helper() - allPins, err := accPins(api.Pin().Ls(ctx)) - if err != nil { - t.Fatal(err) - } - - type pinTypeProps struct { - *cid.Set - opt.PinLsOption - } - - all, recursive, direct, indirect := cid.NewSet(), cid.NewSet(), cid.NewSet(), cid.NewSet() - typeMap := map[string]*pinTypeProps{ - "recursive": {recursive, opt.Pin.Ls.Recursive()}, - "direct": {direct, opt.Pin.Ls.Direct()}, - "indirect": {indirect, opt.Pin.Ls.Indirect()}, - } - - for _, p := range allPins { - if !all.Visit(p.Path().Cid()) { - t.Fatalf("pin ls returned the same cid multiple times") - } - - typeStr := p.Type() - if typeSet, ok := typeMap[p.Type()]; ok { - typeSet.Add(p.Path().Cid()) - } else { - t.Fatalf("unknown pin type: %s", typeStr) - } - } - - for typeStr, pinProps := range typeMap { - pins, err := accPins(api.Pin().Ls(ctx, pinProps.PinLsOption)) - if err != nil { - t.Fatal(err) - } - - if expected, actual := len(pins), pinProps.Set.Len(); expected != actual { - t.Fatalf("pin ls all has %d pins of type %s, but pin ls for the type has %d", expected, typeStr, actual) - } - - for _, p := range pins { - if pinType := p.Type(); pinType != typeStr { - t.Fatalf("returned wrong pin type: expected %s, got %s", typeStr, pinType) - } - - if c := p.Path().Cid(); !pinProps.Has(c) { - t.Fatalf("%s expected to be in pin ls all as type %s", c.String(), typeStr) - } - } - } -} - -func assertIsPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path, typeStr string) { - t.Helper() - withType, err := opt.Pin.IsPinned.Type(typeStr) - if err != nil { - t.Fatal("unhandled pin type") - } - - whyPinned, pinned, err := api.Pin().IsPinned(ctx, p, withType) - if err != nil { - t.Fatal(err) - } - - if !pinned { - t.Fatalf("%s expected to be pinned with type %s", p, typeStr) - } - - switch typeStr { - case "recursive", "direct": - if typeStr != whyPinned { - t.Fatalf("reason for pinning expected to be %s for %s, got %s", typeStr, p, whyPinned) - } - case "indirect": - if whyPinned == "" { - t.Fatalf("expected to have a pin reason for %s", p) - } - } -} - -func assertNotPinned(t *testing.T, ctx context.Context, api iface.CoreAPI, p path.Path) { - t.Helper() - - _, pinned, err := api.Pin().IsPinned(ctx, p) - if err != nil { - t.Fatal(err) - } - - if pinned { - t.Fatalf("%s expected to not be pinned", p) - } -} - -func accPins(pins <-chan iface.Pin, err error) ([]iface.Pin, error) { - if err != nil { - return nil, err - } - - var result []iface.Pin - - for pin := range pins { - if pin.Err() != nil { - return nil, pin.Err() - } - result = append(result, pin) - } - - return result, nil -} diff --git a/coreiface/tests/pubsub.go b/coreiface/tests/pubsub.go deleted file mode 100644 index 8cbc6a3eb..000000000 --- a/coreiface/tests/pubsub.go +++ /dev/null @@ -1,136 +0,0 @@ -package tests - -import ( - "context" - "testing" - "time" - - iface "github.com/ipfs/boxo/coreiface" - "github.com/ipfs/boxo/coreiface/options" -) - -func (tp *TestSuite) TestPubSub(t *testing.T) { - tp.hasApi(t, func(api iface.CoreAPI) error { - if api.PubSub() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestBasicPubSub", tp.TestBasicPubSub) -} - -func (tp *TestSuite) TestBasicPubSub(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - apis, err := tp.MakeAPISwarm(t, ctx, 2) - if err != nil { - t.Fatal(err) - } - - sub, err := apis[0].PubSub().Subscribe(ctx, "testch") - if err != nil { - t.Fatal(err) - } - - done := make(chan struct{}) - go func() { - defer close(done) - - ticker := time.NewTicker(100 * time.Millisecond) - defer ticker.Stop() - - for { - err := apis[1].PubSub().Publish(ctx, "testch", []byte("hello world")) - switch err { - case nil: - case context.Canceled: - return - default: - t.Error(err) - cancel() - return - } - select { - case <-ticker.C: - case <-ctx.Done(): - return - } - } - }() - - // Wait for the sender to finish before we return. - // Otherwise, we can get random errors as publish fails. - defer func() { - cancel() - <-done - }() - - m, err := sub.Next(ctx) - if err != nil { - t.Fatal(err) - } - - if string(m.Data()) != "hello world" { - t.Errorf("got invalid data: %s", string(m.Data())) - } - - self1, err := apis[1].Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - if m.From() != self1.ID() { - t.Errorf("m.From didn't match") - } - - peers, err := apis[1].PubSub().Peers(ctx, options.PubSub.Topic("testch")) - if err != nil { - t.Fatal(err) - } - - if len(peers) != 1 { - t.Fatalf("got incorrect number of peers: %d", len(peers)) - } - - self0, err := apis[0].Key().Self(ctx) - if err != nil { - t.Fatal(err) - } - - if peers[0] != self0.ID() { - t.Errorf("peer didn't match") - } - - peers, err = apis[1].PubSub().Peers(ctx, options.PubSub.Topic("nottestch")) - if err != nil { - t.Fatal(err) - } - - if len(peers) != 0 { - t.Fatalf("got incorrect number of peers: %d", len(peers)) - } - - topics, err := apis[0].PubSub().Ls(ctx) - if err != nil { - t.Fatal(err) - } - - if len(topics) != 1 { - t.Fatalf("got incorrect number of topics: %d", len(peers)) - } - - if topics[0] != "testch" { - t.Errorf("topic didn't match") - } - - topics, err = apis[1].PubSub().Ls(ctx) - if err != nil { - t.Fatal(err) - } - - if len(topics) != 0 { - t.Fatalf("got incorrect number of topics: %d", len(peers)) - } -} diff --git a/coreiface/tests/routing.go b/coreiface/tests/routing.go deleted file mode 100644 index fd10dffcd..000000000 --- a/coreiface/tests/routing.go +++ /dev/null @@ -1,100 +0,0 @@ -package tests - -import ( - "context" - "testing" - "time" - - iface "github.com/ipfs/boxo/coreiface" - "github.com/ipfs/boxo/coreiface/options" - "github.com/ipfs/boxo/coreiface/path" - "github.com/ipfs/boxo/ipns" - "github.com/stretchr/testify/require" -) - -func (tp *TestSuite) TestRouting(t *testing.T) { - tp.hasApi(t, func(api iface.CoreAPI) error { - if api.Routing() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestRoutingGet", tp.TestRoutingGet) - t.Run("TestRoutingPut", tp.TestRoutingPut) - t.Run("TestRoutingPutOffline", tp.TestRoutingPutOffline) -} - -func (tp *TestSuite) testRoutingPublishKey(t *testing.T, ctx context.Context, api iface.CoreAPI, opts ...options.NamePublishOption) (path.Path, ipns.Name) { - p, err := addTestObject(ctx, api) - require.NoError(t, err) - - name, err := api.Name().Publish(ctx, p, opts...) - require.NoError(t, err) - - time.Sleep(3 * time.Second) - return p, name -} - -func (tp *TestSuite) TestRoutingGet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - apis, err := tp.MakeAPISwarm(t, ctx, 2) - require.NoError(t, err) - - // Node 1: publishes an IPNS name - p, name := tp.testRoutingPublishKey(t, ctx, apis[0]) - - // Node 2: retrieves the best value for the IPNS name. - data, err := apis[1].Routing().Get(ctx, ipns.NamespacePrefix+name.String()) - require.NoError(t, err) - - rec, err := ipns.UnmarshalRecord(data) - require.NoError(t, err) - - val, err := rec.Value() - require.NoError(t, err) - require.Equal(t, p.String(), val.String()) -} - -func (tp *TestSuite) TestRoutingPut(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - apis, err := tp.MakeAPISwarm(t, ctx, 2) - require.NoError(t, err) - - // Create and publish IPNS entry. - _, name := tp.testRoutingPublishKey(t, ctx, apis[0]) - - // Get valid routing value. - data, err := apis[0].Routing().Get(ctx, ipns.NamespacePrefix+name.String()) - require.NoError(t, err) - - // Put routing value. - err = apis[1].Routing().Put(ctx, ipns.NamespacePrefix+name.String(), data) - require.NoError(t, err) -} - -func (tp *TestSuite) TestRoutingPutOffline(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // init a swarm & publish an IPNS entry to get a valid payload - apis, err := tp.MakeAPISwarm(t, ctx, 2) - require.NoError(t, err) - - _, name := tp.testRoutingPublishKey(t, ctx, apis[0], options.Name.AllowOffline(true)) - data, err := apis[0].Routing().Get(ctx, ipns.NamespacePrefix+name.String()) - require.NoError(t, err) - - // init our offline node and try to put the payload - api, err := tp.makeAPIWithIdentityAndOffline(t, ctx) - require.NoError(t, err) - - err = api.Routing().Put(ctx, ipns.NamespacePrefix+name.String(), data) - require.Error(t, err, "this operation should fail because we are offline") - - err = api.Routing().Put(ctx, ipns.NamespacePrefix+name.String(), data, options.Put.AllowOffline(true)) - require.NoError(t, err) -} diff --git a/coreiface/tests/unixfs.go b/coreiface/tests/unixfs.go deleted file mode 100644 index 25c3ac1b7..000000000 --- a/coreiface/tests/unixfs.go +++ /dev/null @@ -1,1082 +0,0 @@ -package tests - -import ( - "bytes" - "context" - "encoding/hex" - "fmt" - "io" - "math" - "math/rand" - "os" - "strconv" - "strings" - "sync" - "testing" - - "github.com/ipfs/boxo/coreiface/path" - - coreiface "github.com/ipfs/boxo/coreiface" - "github.com/ipfs/boxo/coreiface/options" - - "github.com/ipfs/boxo/files" - mdag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/boxo/ipld/unixfs" - "github.com/ipfs/boxo/ipld/unixfs/importer/helpers" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - ipld "github.com/ipfs/go-ipld-format" - mh "github.com/multiformats/go-multihash" -) - -func (tp *TestSuite) TestUnixfs(t *testing.T) { - tp.hasApi(t, func(api coreiface.CoreAPI) error { - if api.Unixfs() == nil { - return errAPINotImplemented - } - return nil - }) - - t.Run("TestAdd", tp.TestAdd) - t.Run("TestAddPinned", tp.TestAddPinned) - t.Run("TestAddHashOnly", tp.TestAddHashOnly) - t.Run("TestGetEmptyFile", tp.TestGetEmptyFile) - t.Run("TestGetDir", tp.TestGetDir) - t.Run("TestGetNonUnixfs", tp.TestGetNonUnixfs) - t.Run("TestLs", tp.TestLs) - t.Run("TestEntriesExpired", tp.TestEntriesExpired) - t.Run("TestLsEmptyDir", tp.TestLsEmptyDir) - t.Run("TestLsNonUnixfs", tp.TestLsNonUnixfs) - t.Run("TestAddCloses", tp.TestAddCloses) - t.Run("TestGetSeek", tp.TestGetSeek) - t.Run("TestGetReadAt", tp.TestGetReadAt) -} - -// `echo -n 'hello, world!' | ipfs add` -var ( - hello = "/ipfs/QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk" - helloStr = "hello, world!" -) - -// `echo -n | ipfs add` -var emptyFile = "/ipfs/QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH" - -func strFile(data string) func() files.Node { - return func() files.Node { - return files.NewBytesFile([]byte(data)) - } -} - -func twoLevelDir() func() files.Node { - return func() files.Node { - return files.NewMapDirectory(map[string]files.Node{ - "abc": files.NewMapDirectory(map[string]files.Node{ - "def": files.NewBytesFile([]byte("world")), - }), - - "bar": files.NewBytesFile([]byte("hello2")), - "foo": files.NewBytesFile([]byte("hello1")), - }) - } -} - -func flatDir() files.Node { - return files.NewMapDirectory(map[string]files.Node{ - "bar": files.NewBytesFile([]byte("hello2")), - "foo": files.NewBytesFile([]byte("hello1")), - }) -} - -func wrapped(names ...string) func(f files.Node) files.Node { - return func(f files.Node) files.Node { - for i := range names { - f = files.NewMapDirectory(map[string]files.Node{ - names[len(names)-i-1]: f, - }) - } - return f - } -} - -func (tp *TestSuite) TestAdd(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p := func(h string) path.Resolved { - c, err := cid.Parse(h) - if err != nil { - t.Fatal(err) - } - return path.IpfsPath(c) - } - - rf, err := os.CreateTemp(os.TempDir(), "unixfs-add-real") - if err != nil { - t.Fatal(err) - } - rfp := rf.Name() - - if _, err := rf.Write([]byte(helloStr)); err != nil { - t.Fatal(err) - } - - stat, err := rf.Stat() - if err != nil { - t.Fatal(err) - } - - if err := rf.Close(); err != nil { - t.Fatal(err) - } - defer os.Remove(rfp) - - realFile := func() files.Node { - n, err := files.NewReaderPathFile(rfp, io.NopCloser(strings.NewReader(helloStr)), stat) - if err != nil { - t.Fatal(err) - } - return n - } - - cases := []struct { - name string - data func() files.Node - expect func(files.Node) files.Node - - apiOpts []options.ApiOption - - path string - err string - - wrap string - - events []coreiface.AddEvent - - opts []options.UnixfsAddOption - }{ - // Simple cases - { - name: "simpleAdd", - data: strFile(helloStr), - path: hello, - opts: []options.UnixfsAddOption{}, - }, - { - name: "addEmpty", - data: strFile(""), - path: emptyFile, - }, - // CIDv1 version / rawLeaves - { - name: "addCidV1", - data: strFile(helloStr), - path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", - opts: []options.UnixfsAddOption{options.Unixfs.CidVersion(1)}, - }, - { - name: "addCidV1NoLeaves", - data: strFile(helloStr), - path: "/ipfs/bafybeibhbcn7k7o2m6xsqkrlfiokod3nxwe47viteynhruh6uqx7hvkjfu", - opts: []options.UnixfsAddOption{options.Unixfs.CidVersion(1), options.Unixfs.RawLeaves(false)}, - }, - // Non sha256 hash vs CID - { - name: "addCidSha3", - data: strFile(helloStr), - path: "/ipfs/bafkrmichjflejeh6aren53o7pig7zk3m3vxqcoc2i5dv326k3x6obh7jry", - opts: []options.UnixfsAddOption{options.Unixfs.Hash(mh.SHA3_256)}, - }, - { - name: "addCidSha3Cid0", - data: strFile(helloStr), - err: "CIDv0 only supports sha2-256", - opts: []options.UnixfsAddOption{options.Unixfs.CidVersion(0), options.Unixfs.Hash(mh.SHA3_256)}, - }, - // Inline - { - name: "addInline", - data: strFile(helloStr), - path: "/ipfs/bafyaafikcmeaeeqnnbswy3dpfqqho33snrsccgan", - opts: []options.UnixfsAddOption{options.Unixfs.Inline(true)}, - }, - { - name: "addInlineLimit", - data: strFile(helloStr), - path: "/ipfs/bafyaafikcmeaeeqnnbswy3dpfqqho33snrsccgan", - opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(32), options.Unixfs.Inline(true)}, - }, - { - name: "addInlineZero", - data: strFile(""), - path: "/ipfs/bafkqaaa", - opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(0), options.Unixfs.Inline(true), options.Unixfs.RawLeaves(true)}, - }, - { // TODO: after coreapi add is used in `ipfs add`, consider making this default for inline - name: "addInlineRaw", - data: strFile(helloStr), - path: "/ipfs/bafkqadlimvwgy3zmeb3w64tmmqqq", - opts: []options.UnixfsAddOption{options.Unixfs.InlineLimit(32), options.Unixfs.Inline(true), options.Unixfs.RawLeaves(true)}, - }, - // Chunker / Layout - { - name: "addChunks", - data: strFile(strings.Repeat("aoeuidhtns", 200)), - path: "/ipfs/QmRo11d4QJrST47aaiGVJYwPhoNA4ihRpJ5WaxBWjWDwbX", - opts: []options.UnixfsAddOption{options.Unixfs.Chunker("size-4")}, - }, - { - name: "addChunksTrickle", - data: strFile(strings.Repeat("aoeuidhtns", 200)), - path: "/ipfs/QmNNhDGttafX3M1wKWixGre6PrLFGjnoPEDXjBYpTv93HP", - opts: []options.UnixfsAddOption{options.Unixfs.Chunker("size-4"), options.Unixfs.Layout(options.TrickleLayout)}, - }, - // Local - { - name: "addLocal", // better cases in sharness - data: strFile(helloStr), - path: hello, - apiOpts: []options.ApiOption{options.Api.Offline(true)}, - }, - { - name: "hashOnly", // test (non)fetchability - data: strFile(helloStr), - path: hello, - opts: []options.UnixfsAddOption{options.Unixfs.HashOnly(true)}, - }, - // multi file - { - name: "simpleDirNoWrap", - data: flatDir, - path: "/ipfs/QmRKGpFfR32FVXdvJiHfo4WJ5TDYBsM1P9raAp1p6APWSp", - }, - { - name: "simpleDir", - data: flatDir, - wrap: "t", - expect: wrapped("t"), - path: "/ipfs/Qmc3nGXm1HtUVCmnXLQHvWcNwfdZGpfg2SRm1CxLf7Q2Rm", - }, - { - name: "twoLevelDir", - data: twoLevelDir(), - wrap: "t", - expect: wrapped("t"), - path: "/ipfs/QmPwsL3T5sWhDmmAWZHAzyjKtMVDS9a11aHNRqb3xoVnmg", - }, - // wrapped - { - name: "addWrapped", - path: "/ipfs/QmVE9rNpj5doj7XHzp5zMUxD7BJgXEqx4pe3xZ3JBReWHE", - data: func() files.Node { - return files.NewBytesFile([]byte(helloStr)) - }, - wrap: "foo", - expect: wrapped("foo"), - }, - // hidden - { - name: "hiddenFilesAdded", - data: func() files.Node { - return files.NewMapDirectory(map[string]files.Node{ - ".bar": files.NewBytesFile([]byte("hello2")), - "bar": files.NewBytesFile([]byte("hello2")), - "foo": files.NewBytesFile([]byte("hello1")), - }) - }, - wrap: "t", - expect: wrapped("t"), - path: "/ipfs/QmPXLSBX382vJDLrGakcbrZDkU3grfkjMox7EgSC9KFbtQ", - }, - // NoCopy - { - name: "simpleNoCopy", - data: realFile, - path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", - opts: []options.UnixfsAddOption{options.Unixfs.Nocopy(true)}, - }, - { - name: "noCopyNoRaw", - data: realFile, - path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", - opts: []options.UnixfsAddOption{options.Unixfs.Nocopy(true), options.Unixfs.RawLeaves(false)}, - err: "nocopy option requires '--raw-leaves' to be enabled as well", - }, - { - name: "noCopyNoPath", - data: strFile(helloStr), - path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", - opts: []options.UnixfsAddOption{options.Unixfs.Nocopy(true)}, - err: helpers.ErrMissingFsRef.Error(), - }, - // Events / Progress - { - name: "simpleAddEvent", - data: strFile(helloStr), - path: "/ipfs/bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", - events: []coreiface.AddEvent{ - {Name: "bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa", Path: p("bafkreidi4zlleupgp2bvrpxyja5lbvi4mym7hz5bvhyoowby2qp7g2hxfa"), Size: strconv.Itoa(len(helloStr))}, - }, - opts: []options.UnixfsAddOption{options.Unixfs.RawLeaves(true)}, - }, - { - name: "silentAddEvent", - data: twoLevelDir(), - path: "/ipfs/QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr", - events: []coreiface.AddEvent{ - {Name: "abc", Path: p("QmU7nuGs2djqK99UNsNgEPGh6GV4662p6WtsgccBNGTDxt"), Size: "62"}, - {Name: "", Path: p("QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr"), Size: "229"}, - }, - opts: []options.UnixfsAddOption{options.Unixfs.Silent(true)}, - }, - { - name: "dirAddEvents", - data: twoLevelDir(), - path: "/ipfs/QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr", - events: []coreiface.AddEvent{ - {Name: "abc/def", Path: p("QmNyJpQkU1cEkBwMDhDNFstr42q55mqG5GE5Mgwug4xyGk"), Size: "13"}, - {Name: "bar", Path: p("QmS21GuXiRMvJKHos4ZkEmQDmRBqRaF5tQS2CQCu2ne9sY"), Size: "14"}, - {Name: "foo", Path: p("QmfAjGiVpTN56TXi6SBQtstit5BEw3sijKj1Qkxn6EXKzJ"), Size: "14"}, - {Name: "abc", Path: p("QmU7nuGs2djqK99UNsNgEPGh6GV4662p6WtsgccBNGTDxt"), Size: "62"}, - {Name: "", Path: p("QmVG2ZYCkV1S4TK8URA3a4RupBF17A8yAr4FqsRDXVJASr"), Size: "229"}, - }, - }, - { - name: "progress1M", - data: func() files.Node { - return files.NewReaderFile(bytes.NewReader(bytes.Repeat([]byte{0}, 1000000))) - }, - path: "/ipfs/QmXXNNbwe4zzpdMg62ZXvnX1oU7MwSrQ3vAEtuwFKCm1oD", - events: []coreiface.AddEvent{ - {Name: "", Bytes: 262144}, - {Name: "", Bytes: 524288}, - {Name: "", Bytes: 786432}, - {Name: "", Bytes: 1000000}, - {Name: "QmXXNNbwe4zzpdMg62ZXvnX1oU7MwSrQ3vAEtuwFKCm1oD", Path: p("QmXXNNbwe4zzpdMg62ZXvnX1oU7MwSrQ3vAEtuwFKCm1oD"), Size: "1000256"}, - }, - wrap: "", - opts: []options.UnixfsAddOption{options.Unixfs.Progress(true)}, - }, - } - - for _, testCase := range cases { - t.Run(testCase.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // recursive logic - - data := testCase.data() - if testCase.wrap != "" { - data = files.NewMapDirectory(map[string]files.Node{ - testCase.wrap: data, - }) - } - - // handle events if relevant to test case - - opts := testCase.opts - eventOut := make(chan interface{}) - var evtWg sync.WaitGroup - if len(testCase.events) > 0 { - opts = append(opts, options.Unixfs.Events(eventOut)) - evtWg.Add(1) - - go func() { - defer evtWg.Done() - expected := testCase.events - - for evt := range eventOut { - event, ok := evt.(*coreiface.AddEvent) - if !ok { - t.Error("unexpected event type") - continue - } - - if len(expected) < 1 { - t.Error("got more events than expected") - continue - } - - if expected[0].Size != event.Size { - t.Errorf("Event.Size didn't match, %s != %s", expected[0].Size, event.Size) - } - - if expected[0].Name != event.Name { - t.Errorf("Event.Name didn't match, %s != %s", expected[0].Name, event.Name) - } - - if expected[0].Path != nil && event.Path != nil { - if expected[0].Path.Cid().String() != event.Path.Cid().String() { - t.Errorf("Event.Hash didn't match, %s != %s", expected[0].Path, event.Path) - } - } else if event.Path != expected[0].Path { - t.Errorf("Event.Hash didn't match, %s != %s", expected[0].Path, event.Path) - } - if expected[0].Bytes != event.Bytes { - t.Errorf("Event.Bytes didn't match, %d != %d", expected[0].Bytes, event.Bytes) - } - - expected = expected[1:] - } - - if len(expected) > 0 { - t.Errorf("%d event(s) didn't arrive", len(expected)) - } - }() - } - - tapi, err := api.WithOptions(testCase.apiOpts...) - if err != nil { - t.Fatal(err) - } - - // Add! - - p, err := tapi.Unixfs().Add(ctx, data, opts...) - close(eventOut) - evtWg.Wait() - if testCase.err != "" { - if err == nil { - t.Fatalf("expected an error: %s", testCase.err) - } - if err.Error() != testCase.err { - t.Fatalf("expected an error: '%s' != '%s'", err.Error(), testCase.err) - } - return - } - if err != nil { - t.Fatal(err) - } - - if p.String() != testCase.path { - t.Errorf("expected path %s, got: %s", testCase.path, p) - } - - // compare file structure with Unixfs().Get - - var cmpFile func(origName string, orig files.Node, gotName string, got files.Node) - cmpFile = func(origName string, orig files.Node, gotName string, got files.Node) { - _, origDir := orig.(files.Directory) - _, gotDir := got.(files.Directory) - - if origName != gotName { - t.Errorf("file name mismatch, orig='%s', got='%s'", origName, gotName) - } - - if origDir != gotDir { - t.Fatalf("file type mismatch on %s", origName) - } - - if !gotDir { - defer orig.Close() - defer got.Close() - - do, err := io.ReadAll(orig.(files.File)) - if err != nil { - t.Fatal(err) - } - - dg, err := io.ReadAll(got.(files.File)) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(do, dg) { - t.Fatal("data not equal") - } - - return - } - - origIt := orig.(files.Directory).Entries() - gotIt := got.(files.Directory).Entries() - - for { - if origIt.Next() { - if !gotIt.Next() { - t.Fatal("gotIt out of entries before origIt") - } - } else { - if gotIt.Next() { - t.Fatal("origIt out of entries before gotIt") - } - break - } - - cmpFile(origIt.Name(), origIt.Node(), gotIt.Name(), gotIt.Node()) - } - if origIt.Err() != nil { - t.Fatal(origIt.Err()) - } - if gotIt.Err() != nil { - t.Fatal(gotIt.Err()) - } - } - - f, err := tapi.Unixfs().Get(ctx, p) - if err != nil { - t.Fatal(err) - } - - orig := testCase.data() - if testCase.expect != nil { - orig = testCase.expect(orig) - } - - cmpFile("", orig, "", f) - }) - } -} - -func (tp *TestSuite) TestAddPinned(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.Pin(true)) - if err != nil { - t.Fatal(err) - } - - pins, err := accPins(api.Pin().Ls(ctx)) - if err != nil { - t.Fatal(err) - } - if len(pins) != 1 { - t.Fatalf("expected 1 pin, got %d", len(pins)) - } - - if pins[0].Path().String() != "/ipld/QmQy2Dw4Wk7rdJKjThjYXzfFJNaRKRHhHP5gHHXroJMYxk" { - t.Fatalf("got unexpected pin: %s", pins[0].Path().String()) - } -} - -func (tp *TestSuite) TestAddHashOnly(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - p, err := api.Unixfs().Add(ctx, strFile(helloStr)(), options.Unixfs.HashOnly(true)) - if err != nil { - t.Fatal(err) - } - - if p.String() != hello { - t.Errorf("unxepected path: %s", p.String()) - } - - _, err = api.Block().Get(ctx, p) - if err == nil { - t.Fatal("expected an error") - } - if !ipld.IsNotFound(err) { - t.Errorf("unxepected error: %s", err.Error()) - } -} - -func (tp *TestSuite) TestGetEmptyFile(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Unixfs().Add(ctx, files.NewBytesFile([]byte{})) - if err != nil { - t.Fatal(err) - } - - emptyFilePath := path.New(emptyFile) - - r, err := api.Unixfs().Get(ctx, emptyFilePath) - if err != nil { - t.Fatal(err) - } - - buf := make([]byte, 1) // non-zero so that Read() actually tries to read - n, err := io.ReadFull(r.(files.File), buf) - if err != nil && err != io.EOF { - t.Error(err) - } - if !bytes.HasPrefix(buf, []byte{0x00}) { - t.Fatalf("expected empty data, got [%s] [read=%d]", buf, n) - } -} - -func (tp *TestSuite) TestGetDir(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - edir := unixfs.EmptyDirNode() - err = api.Dag().Add(ctx, edir) - if err != nil { - t.Fatal(err) - } - p := path.IpfsPath(edir.Cid()) - - emptyDir, err := api.Object().New(ctx, options.Object.Type("unixfs-dir")) - if err != nil { - t.Fatal(err) - } - - if p.String() != path.IpfsPath(emptyDir.Cid()).String() { - t.Fatalf("expected path %s, got: %s", emptyDir.Cid(), p.String()) - } - - r, err := api.Unixfs().Get(ctx, path.IpfsPath(emptyDir.Cid())) - if err != nil { - t.Fatal(err) - } - - if _, ok := r.(files.Directory); !ok { - t.Fatalf("expected a directory") - } -} - -func (tp *TestSuite) TestGetNonUnixfs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - nd := new(mdag.ProtoNode) - err = api.Dag().Add(ctx, nd) - if err != nil { - t.Fatal(err) - } - - _, err = api.Unixfs().Get(ctx, path.IpfsPath(nd.Cid())) - if !strings.Contains(err.Error(), "proto: required field") { - t.Fatalf("expected protobuf error, got: %s", err) - } -} - -func (tp *TestSuite) TestLs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - r := strings.NewReader("content-of-file") - p, err := api.Unixfs().Add(ctx, files.NewMapDirectory(map[string]files.Node{ - "name-of-file": files.NewReaderFile(r), - "name-of-symlink": files.NewLinkFile("/foo/bar", nil), - })) - if err != nil { - t.Fatal(err) - } - - entries, err := api.Unixfs().Ls(ctx, p) - if err != nil { - t.Fatal(err) - } - - entry := <-entries - if entry.Err != nil { - t.Fatal(entry.Err) - } - if entry.Size != 15 { - t.Errorf("expected size = 15, got %d", entry.Size) - } - if entry.Name != "name-of-file" { - t.Errorf("expected name = name-of-file, got %s", entry.Name) - } - if entry.Type != coreiface.TFile { - t.Errorf("wrong type %s", entry.Type) - } - if entry.Cid.String() != "QmX3qQVKxDGz3URVC3861Z3CKtQKGBn6ffXRBBWGMFz9Lr" { - t.Errorf("expected cid = QmX3qQVKxDGz3URVC3861Z3CKtQKGBn6ffXRBBWGMFz9Lr, got %s", entry.Cid) - } - entry = <-entries - if entry.Err != nil { - t.Fatal(entry.Err) - } - if entry.Type != coreiface.TSymlink { - t.Errorf("wrong type %s", entry.Type) - } - if entry.Name != "name-of-symlink" { - t.Errorf("expected name = name-of-symlink, got %s", entry.Name) - } - if entry.Target != "/foo/bar" { - t.Errorf("expected symlink target to be /foo/bar, got %s", entry.Target) - } - - if l, ok := <-entries; ok { - t.Errorf("didn't expect a second link") - if l.Err != nil { - t.Error(l.Err) - } - } -} - -func (tp *TestSuite) TestEntriesExpired(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - r := strings.NewReader("content-of-file") - p, err := api.Unixfs().Add(ctx, files.NewMapDirectory(map[string]files.Node{ - "name-of-file": files.NewReaderFile(r), - })) - if err != nil { - t.Fatal(err) - } - - ctx, cancel = context.WithCancel(ctx) - - nd, err := api.Unixfs().Get(ctx, p) - if err != nil { - t.Fatal(err) - } - cancel() - - it := files.ToDir(nd).Entries() - if it == nil { - t.Fatal("it was nil") - } - - if it.Next() { - t.Fatal("Next succeeded") - } - - if it.Err() != context.Canceled { - t.Fatalf("unexpected error %s", it.Err()) - } - - if it.Next() { - t.Fatal("Next succeeded") - } -} - -func (tp *TestSuite) TestLsEmptyDir(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - _, err = api.Unixfs().Add(ctx, files.NewSliceDirectory([]files.DirEntry{})) - if err != nil { - t.Fatal(err) - } - - emptyDir, err := api.Object().New(ctx, options.Object.Type("unixfs-dir")) - if err != nil { - t.Fatal(err) - } - - links, err := api.Unixfs().Ls(ctx, path.IpfsPath(emptyDir.Cid())) - if err != nil { - t.Fatal(err) - } - - if len(links) != 0 { - t.Fatalf("expected 0 links, got %d", len(links)) - } -} - -// TODO(lgierth) this should test properly, with len(links) > 0 -func (tp *TestSuite) TestLsNonUnixfs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - nd, err := cbor.WrapObject(map[string]interface{}{"foo": "bar"}, math.MaxUint64, -1) - if err != nil { - t.Fatal(err) - } - - err = api.Dag().Add(ctx, nd) - if err != nil { - t.Fatal(err) - } - - links, err := api.Unixfs().Ls(ctx, path.IpfsPath(nd.Cid())) - if err != nil { - t.Fatal(err) - } - - if len(links) != 0 { - t.Fatalf("expected 0 links, got %d", len(links)) - } -} - -type closeTestF struct { - files.File - closed bool - - t *testing.T -} - -type closeTestD struct { - files.Directory - closed bool - - t *testing.T -} - -func (f *closeTestD) Close() error { - f.t.Helper() - if f.closed { - f.t.Fatal("already closed") - } - f.closed = true - return nil -} - -func (f *closeTestF) Close() error { - if f.closed { - f.t.Fatal("already closed") - } - f.closed = true - return nil -} - -func (tp *TestSuite) TestAddCloses(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - n4 := &closeTestF{files.NewBytesFile([]byte("foo")), false, t} - d3 := &closeTestD{files.NewMapDirectory(map[string]files.Node{ - "sub": n4, - }), false, t} - n2 := &closeTestF{files.NewBytesFile([]byte("bar")), false, t} - n1 := &closeTestF{files.NewBytesFile([]byte("baz")), false, t} - d0 := &closeTestD{files.NewMapDirectory(map[string]files.Node{ - "a": d3, - "b": n1, - "c": n2, - }), false, t} - - _, err = api.Unixfs().Add(ctx, d0) - if err != nil { - t.Fatal(err) - } - - for i, n := range []*closeTestF{n1, n2, n4} { - if !n.closed { - t.Errorf("file %d not closed!", i) - } - } - - for i, n := range []*closeTestD{d0, d3} { - if !n.closed { - t.Errorf("dir %d not closed!", i) - } - } -} - -func (tp *TestSuite) TestGetSeek(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - dataSize := int64(100000) - tf := files.NewReaderFile(io.LimitReader(rand.New(rand.NewSource(1403768328)), dataSize)) - - p, err := api.Unixfs().Add(ctx, tf, options.Unixfs.Chunker("size-100")) - if err != nil { - t.Fatal(err) - } - - r, err := api.Unixfs().Get(ctx, p) - if err != nil { - t.Fatal(err) - } - - f := files.ToFile(r) - if f == nil { - t.Fatal("not a file") - } - - orig := make([]byte, dataSize) - if _, err := io.ReadFull(f, orig); err != nil { - t.Fatal(err) - } - f.Close() - - origR := bytes.NewReader(orig) - - r, err = api.Unixfs().Get(ctx, p) - if err != nil { - t.Fatal(err) - } - - f = files.ToFile(r) - if f == nil { - t.Fatal("not a file") - } - - test := func(offset int64, whence int, read int, expect int64, shouldEof bool) { - t.Run(fmt.Sprintf("seek%d+%d-r%d-%d", whence, offset, read, expect), func(t *testing.T) { - n, err := f.Seek(offset, whence) - if err != nil { - t.Fatal(err) - } - origN, err := origR.Seek(offset, whence) - if err != nil { - t.Fatal(err) - } - - if n != origN { - t.Fatalf("offsets didn't match, expected %d, got %d", origN, n) - } - - buf := make([]byte, read) - origBuf := make([]byte, read) - origRead, err := origR.Read(origBuf) - if err != nil { - t.Fatalf("orig: %s", err) - } - r, err := io.ReadFull(f, buf) - switch { - case shouldEof && err != nil && err != io.ErrUnexpectedEOF: - fallthrough - case !shouldEof && err != nil: - t.Fatalf("f: %s", err) - case shouldEof: - _, err := f.Read([]byte{0}) - if err != io.EOF { - t.Fatal("expected EOF") - } - _, err = origR.Read([]byte{0}) - if err != io.EOF { - t.Fatal("expected EOF (orig)") - } - } - - if int64(r) != expect { - t.Fatal("read wrong amount of data") - } - if r != origRead { - t.Fatal("read different amount of data than bytes.Reader") - } - if !bytes.Equal(buf, origBuf) { - fmt.Fprintf(os.Stderr, "original:\n%s\n", hex.Dump(origBuf)) - fmt.Fprintf(os.Stderr, "got:\n%s\n", hex.Dump(buf)) - t.Fatal("data didn't match") - } - }) - } - - test(3, io.SeekCurrent, 10, 10, false) - test(3, io.SeekCurrent, 10, 10, false) - test(500, io.SeekCurrent, 10, 10, false) - test(350, io.SeekStart, 100, 100, false) - test(-123, io.SeekCurrent, 100, 100, false) - test(0, io.SeekStart, int(dataSize), dataSize, false) - test(dataSize-50, io.SeekStart, 100, 50, true) - test(-5, io.SeekEnd, 100, 5, true) -} - -func (tp *TestSuite) TestGetReadAt(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - api, err := tp.makeAPI(t, ctx) - if err != nil { - t.Fatal(err) - } - - dataSize := int64(100000) - tf := files.NewReaderFile(io.LimitReader(rand.New(rand.NewSource(1403768328)), dataSize)) - - p, err := api.Unixfs().Add(ctx, tf, options.Unixfs.Chunker("size-100")) - if err != nil { - t.Fatal(err) - } - - r, err := api.Unixfs().Get(ctx, p) - if err != nil { - t.Fatal(err) - } - - f, ok := r.(interface { - files.File - io.ReaderAt - }) - if !ok { - t.Skip("ReaderAt not implemented") - } - - orig := make([]byte, dataSize) - if _, err := io.ReadFull(f, orig); err != nil { - t.Fatal(err) - } - f.Close() - - origR := bytes.NewReader(orig) - - if _, err := api.Unixfs().Get(ctx, p); err != nil { - t.Fatal(err) - } - - test := func(offset int64, read int, expect int64, shouldEof bool) { - t.Run(fmt.Sprintf("readat%d-r%d-%d", offset, read, expect), func(t *testing.T) { - origBuf := make([]byte, read) - origRead, err := origR.ReadAt(origBuf, offset) - if err != nil && err != io.EOF { - t.Fatalf("orig: %s", err) - } - buf := make([]byte, read) - r, err := f.ReadAt(buf, offset) - if shouldEof { - if err != io.EOF { - t.Fatal("expected EOF, got: ", err) - } - } else if err != nil { - t.Fatal("got: ", err) - } - - if int64(r) != expect { - t.Fatal("read wrong amount of data") - } - if r != origRead { - t.Fatal("read different amount of data than bytes.Reader") - } - if !bytes.Equal(buf, origBuf) { - fmt.Fprintf(os.Stderr, "original:\n%s\n", hex.Dump(origBuf)) - fmt.Fprintf(os.Stderr, "got:\n%s\n", hex.Dump(buf)) - t.Fatal("data didn't match") - } - }) - } - - test(3, 10, 10, false) - test(13, 10, 10, false) - test(513, 10, 10, false) - test(350, 100, 100, false) - test(0, int(dataSize), dataSize, false) - test(dataSize-50, 100, 50, true) -} diff --git a/coreiface/unixfs.go b/coreiface/unixfs.go deleted file mode 100644 index 606bc8e78..000000000 --- a/coreiface/unixfs.go +++ /dev/null @@ -1,80 +0,0 @@ -package iface - -import ( - "context" - - "github.com/ipfs/boxo/coreiface/options" - path "github.com/ipfs/boxo/coreiface/path" - - "github.com/ipfs/boxo/files" - "github.com/ipfs/go-cid" -) - -type AddEvent struct { - Name string - Path path.Resolved `json:",omitempty"` - Bytes int64 `json:",omitempty"` - Size string `json:",omitempty"` -} - -// FileType is an enum of possible UnixFS file types. -type FileType int32 - -const ( - // TUnknown means the file type isn't known (e.g., it hasn't been - // resolved). - TUnknown FileType = iota - // TFile is a regular file. - TFile - // TDirectory is a directory. - TDirectory - // TSymlink is a symlink. - TSymlink -) - -func (t FileType) String() string { - switch t { - case TUnknown: - return "unknown" - case TFile: - return "file" - case TDirectory: - return "directory" - case TSymlink: - return "symlink" - default: - return "" - } -} - -// DirEntry is a directory entry returned by `Ls`. -type DirEntry struct { - Name string - Cid cid.Cid - - // Only filled when asked to resolve the directory entry. - Size uint64 // The size of the file in bytes (or the size of the symlink). - Type FileType // The type of the file. - Target string // The symlink target (if a symlink). - - Err error -} - -// UnixfsAPI is the basic interface to immutable files in IPFS -// NOTE: This API is heavily WIP, things are guaranteed to break frequently -type UnixfsAPI interface { - // Add imports the data from the reader into merkledag file - // - // TODO: a long useful comment on how to use this for many different scenarios - Add(context.Context, files.Node, ...options.UnixfsAddOption) (path.Resolved, error) - - // Get returns a read-only handle to a file tree referenced by a path - // - // Note that some implementations of this API may apply the specified context - // to operations performed on the returned file - Get(context.Context, path.Path) (files.Node, error) - - // Ls returns the list of links in a directory. Links aren't guaranteed to be - // returned in order - Ls(context.Context, path.Path, ...options.UnixfsLsOption) (<-chan DirEntry, error) -} diff --git a/coreiface/util.go b/coreiface/util.go deleted file mode 100644 index 6d58bf40d..000000000 --- a/coreiface/util.go +++ /dev/null @@ -1,20 +0,0 @@ -package iface - -import ( - "context" - "io" -) - -type Reader interface { - ReadSeekCloser - Size() uint64 - CtxReadFull(context.Context, []byte) (int, error) -} - -// A ReadSeekCloser implements interfaces to read, copy, seek and close. -type ReadSeekCloser interface { - io.Reader - io.Seeker - io.Closer - io.WriterTo -} diff --git a/examples/README.md b/examples/README.md index 438766e4c..fa5408732 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,11 +1,32 @@ -# boxo examples and tutorials +# Boxo Examples and Tutorials -In this folder, you can find some examples to help you get started using boxo and its associated libraries in your applications. +In this directory, you can find some examples to help you get started using Boxo and its associated libraries in your applications. -Let us know if you find any issue or if you want to contribute and add a new tutorial, feel welcome to submit a pr, thank you! +Let us know if you find any issue or if you want to contribute and add a new tutorial, feel welcome to submit a PR, thank you! + +## How To Use the Examples + +The examples are designed to give users a starting point to create certain things using Boxo. However, this directory is not meant to be directly copied out of Boxo, since it uses a replacement directive in the [`go.mod`](go.mod) file. You can also not `go install`. + +If you want to copy of the examples out of Boxo in order to use it in your own product, you need to remove the replacement directive and ensure you're using the latest Boxo version: + +```bash +> go mod edit -dropreplace=github.com/ipfs/boxo +> go get github.com/ipfs/boxo@latest +> go mod tidy +``` + +## How To Create an Example + +All examples are self-contained inside the same module ([`examples`](go.mod)). To create an example, clone Boxo, navigate to this directory and create a sub-directory with a descriptive name for the example. If the example pertains a topic that has multiple examples, such as `gateway`, create a sub-directory there. + +The new example must contain a descriptive `README.md` file, which explains what the example is, how to build and use it. See the existing examples to have an idea of how extensive it should be. In addition, your code must be properly documented. + +Once you have your example finished, do not forget to run `go mod tidy` and adding a link to the example in the section [Examples and Tutorials](#examples-and-tutorials) below. ## Examples and Tutorials - [Fetching a UnixFS file by CID](./unixfs-file-cid) - [Gateway backed by a CAR file](./gateway/car) - [Gateway backed by a remote blockstore and IPNS resolver](./gateway/proxy) +- [Delegated Routing V1 Command Line Client](./routing/delegated-routing-client/) diff --git a/examples/go.mod b/examples/go.mod index 30d639ff3..3977fe325 100644 --- a/examples/go.mod +++ b/examples/go.mod @@ -9,9 +9,9 @@ require ( github.com/ipfs/go-datastore v0.6.1-0.20230901172804-1caa2449ed7c github.com/ipld/go-car/v2 v2.10.2-0.20230622090957-499d0c909d33 github.com/ipld/go-ipld-prime v0.21.0 - github.com/libp2p/go-libp2p v0.30.0 + github.com/libp2p/go-libp2p v0.32.0 github.com/libp2p/go-libp2p-routing-helpers v0.7.0 - github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-multicodec v0.9.0 github.com/prometheus/client_golang v1.16.0 github.com/stretchr/testify v1.8.4 @@ -46,10 +46,9 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect @@ -57,7 +56,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect - github.com/huin/goupnp v1.2.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect @@ -76,7 +75,7 @@ require ( github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect github.com/jbenet/goprocess v0.1.4 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -93,9 +92,9 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.55 // indirect + github.com/miekg/dns v1.1.56 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -106,9 +105,9 @@ require ( github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect - github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.11.0 // indirect + github.com/onsi/ginkgo/v2 v2.13.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect @@ -121,10 +120,11 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.2 // indirect - github.com/quic-go/quic-go v0.38.0 // indirect - github.com/quic-go/webtransport-go v0.5.3 // indirect + github.com/quic-go/qtls-go1-20 v0.3.4 // indirect + github.com/quic-go/quic-go v0.39.3 // indirect + github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect + github.com/samber/lo v1.36.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb // indirect github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc // indirect @@ -146,18 +146,19 @@ require ( go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/otel/trace v1.19.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/dig v1.17.0 // indirect - go.uber.org/fx v1.20.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/fx v1.20.1 // indirect + go.uber.org/mock v0.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.25.0 // indirect + go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect - golang.org/x/sync v0.3.0 // indirect + golang.org/x/sync v0.4.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect diff --git a/examples/go.sum b/examples/go.sum index 286f280a5..c9395dd2d 100644 --- a/examples/go.sum +++ b/examples/go.sum @@ -98,8 +98,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -129,8 +127,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f h1:pDhu5sgp8yJlEF/g6osliIIpF9K4F5jvkULXa4daRDQ= -github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= @@ -139,6 +137,7 @@ github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -156,8 +155,8 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= -github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= @@ -234,8 +233,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -255,8 +254,8 @@ github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+ github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= -github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= +github.com/libp2p/go-libp2p v0.32.0 h1:86I4B7nBUPIyTgw3+5Ibq6K7DdKRCuZw8URCfPc1hQM= +github.com/libp2p/go-libp2p v0.32.0/go.mod h1:hXXC3kXPlBZ1eu8Q2hptGrMB4mZ3048JUoS4EKaHW5c= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-kad-dht v0.23.0 h1:sxE6LxLopp79eLeV695n7+c77V/Vn4AMF28AdM/XFqM= @@ -283,15 +282,15 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -318,8 +317,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= -github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= +github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= +github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= @@ -337,16 +336,16 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= @@ -383,18 +382,20 @@ github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwa github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= -github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc= -github.com/quic-go/quic-go v0.38.0/go.mod h1:MPCuRq7KBK2hNcfKj/1iD1BGuN3eAYMeNxp3T42LRUg= -github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= -github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= +github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.39.3 h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg= +github.com/quic-go/quic-go v0.39.3/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= +github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= +github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.36.0 h1:4LaOxH1mHnbDGhTVE0i1z8v/lWaQW8AIfOD3HU4mSaw= +github.com/samber/lo v1.36.0/go.mod h1:HLeWcJRRyLKp3+/XBJvOrerCQn9mhdKMHyd7IRlgeQ8= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -445,6 +446,7 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/thoas/go-funk v0.9.1 h1:O549iLZqPpTUQ10ykd26sZhzD+rmR5pWhuElrhbC20M= github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= @@ -510,12 +512,14 @@ go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v8 go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= -go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= +go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -523,8 +527,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -540,8 +544,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -553,8 +557,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -588,8 +592,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -639,10 +643,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/examples/routing/delegated-routing-client/.gitignore b/examples/routing/delegated-routing-client/.gitignore new file mode 100644 index 000000000..2555457e2 --- /dev/null +++ b/examples/routing/delegated-routing-client/.gitignore @@ -0,0 +1 @@ +delegated-routing-client \ No newline at end of file diff --git a/examples/routing/delegated-routing-client/README.md b/examples/routing/delegated-routing-client/README.md new file mode 100644 index 000000000..3e34e4531 --- /dev/null +++ b/examples/routing/delegated-routing-client/README.md @@ -0,0 +1,71 @@ +# Delegated Routing V1 Command Line Client + +This is an example of how to use the Delegated Routing V1 HTTP client from Boxo. +In this package, we build a small command line tool that allows you to connect to +a Routing V1 endpoint and fetch content providers, peer information, as well as +IPNS records for a certain IPNS name. + +## Build + +```bash +> go build -o delegated-routing-client +``` + +## Usage + +First, you will need a HTTP endpoint compatible with [Delegated Routing V1 Specification][Specification]. +For that, you can potentially use [Kubo], which supports [exposing][kubo-conf] +a `/routing/v1` endpoint. For the commands below, we assume the HTTP server that +provides the endpoint is `http://127.0.0.1:8080`. + +### Find CID Providers + +To find providers, provide the flag `-cid` with the [CID] of the content you're looking for: + +```console +$ ./delegated-routing-client -e http://127.0.0.1:8080 -cid bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4 + +12D3KooWEfL19QqRGGLraaAYw1XA3dtDdVRYaHt6jymFxcuQo3Zm + Protocols: [] + Addresses: [/ip4/163.47.51.218/tcp/28131] +12D3KooWK53GAx2g2UUYfJHHjxDbVLeDgGxNMHXDWeJa5KgMhTD2 + Protocols: [] + Addresses: [/ip4/195.167.147.43/udp/8888/quic /ip4/195.167.147.43/tcp/8888] +12D3KooWCpr8kACTRLKrPy4LPpSX7LXvKQ7eYqTmY8CBvgK5HZgB + Protocols: [] + Addresses: [/ip4/163.47.49.234/tcp/28102] +12D3KooWC9L4RjPGgqpzBUBkcVpKjJYofCkC5i5QdQftg1LdsFb2 + Protocols: [] + Addresses: [/ip4/198.244.201.187/tcp/4001] +``` + +### Find Peer Information + +To find a peer, provide the flag `-peer` with the [Peer ID] of the peer you're looking for: + + +```console +$ ./delegated-routing-client -e http://127.0.0.1:8080 -peer 12D3KooWC9L4RjPGgqpzBUBkcVpKjJYofCkC5i5QdQftg1LdsFb2 + +12D3KooWC9L4RjPGgqpzBUBkcVpKjJYofCkC5i5QdQftg1LdsFb2 + Protocols: [] + Addresses: [/ip4/198.244.201.187/tcp/4001] +``` + +### Get an IPNS Record + +To find an IPNS record, provide the flag `-ipns` with the [IPNS Name] you're trying to find a record for: + +```console +$ ./delegated-routing-client -e http://127.0.0.1:8080 -ipns /ipns/k51qzi5uqu5diuz0h5tjqama8qbmyxusvqz2hfgn5go5l07l9k2ubqa09m7toe + +/ipns/k51qzi5uqu5diuz0h5tjqama8qbmyxusvqz2hfgn5go5l07l9k2ubqa09m7toe + Value: /ipfs/QmUGMoVz62ZARyxkrdEiwmFZanTwVWLLu6EAWvbWHNcwR8 +``` + +[Specification]: https://specs.ipfs.tech/routing/http-routing-v1/ +[Kubo]: https://github.com/ipfs/kubo +[kubo-conf]: https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewayexposeroutingapi +[CID]: https://docs.ipfs.tech/concepts/content-addressing/#what-is-a-cid +[Peer ID]: https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id +[IPNS Name]: https://specs.ipfs.tech/ipns/ipns-record/#ipns-name diff --git a/examples/routing/delegated-routing-client/main.go b/examples/routing/delegated-routing-client/main.go new file mode 100644 index 000000000..8fac342ac --- /dev/null +++ b/examples/routing/delegated-routing-client/main.go @@ -0,0 +1,170 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "io" + "log" + "os" + "time" + + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/routing/http/client" + "github.com/ipfs/boxo/routing/http/types" + "github.com/ipfs/boxo/routing/http/types/iter" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/peer" +) + +func main() { + gatewayUrlPtr := flag.String("e", "", "routing v1 endpoint to use") + timeoutPtr := flag.Int("t", 10, "timeout in seconds for lookup") + cidPtr := flag.String("cid", "", "cid to find") + pidPtr := flag.String("peer", "", "peer to find") + namePtr := flag.String("ipns", "", "ipns name to retrieve record for") + flag.Parse() + + if err := run(os.Stdout, *gatewayUrlPtr, *cidPtr, *pidPtr, *namePtr, *timeoutPtr); err != nil { + log.Fatal(err) + } +} + +func run(w io.Writer, gatewayURL, cidStr, pidStr, nameStr string, timeoutSeconds int) error { + // Creates a new Delegated Routing V1 client. + client, err := client.New(gatewayURL) + if err != nil { + return err + } + + timeout := time.Duration(timeoutSeconds) * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + if cidStr != "" { + return findProviders(w, ctx, client, cidStr) + } else if pidStr != "" { + return findPeers(w, ctx, client, pidStr) + } else if nameStr != "" { + return findIPNS(w, ctx, client, nameStr) + } else { + return errors.New("cid or peer must be provided") + } +} + +func findProviders(w io.Writer, ctx context.Context, client *client.Client, cidStr string) error { + // Parses the given CID to lookup the providers for. + contentCid, err := cid.Parse(cidStr) + if err != nil { + return err + } + + // Ask for providers providing the given content CID. + recordsIter, err := client.FindProviders(ctx, contentCid) + if err != nil { + return err + } + defer recordsIter.Close() + return printIter(w, recordsIter) +} + +func printIter(w io.Writer, iter iter.ResultIter[types.Record]) error { + // The response is streamed. Alternatively, you could use [iter.ReadAll] + // to fetch all the results all at once, instead of iterating as they are + // streamed. + for iter.Next() { + res := iter.Val() + + // Check for error, but do not complain if we exceeded the timeout. We are + // expecting that to happen: we explicitly defined a timeout. + if res.Err != nil { + if !errors.Is(res.Err, context.DeadlineExceeded) { + return res.Err + } + + return nil + } + + switch res.Val.GetSchema() { + case types.SchemaPeer: + record := res.Val.(*types.PeerRecord) + fmt.Fprintln(w, record.ID) + fmt.Fprintln(w, "\tProtocols:", record.Protocols) + fmt.Fprintln(w, "\tAddresses:", record.Addrs) + default: + // You may not want to fail here, it's up to you. You can just handle + // the schemas you want, or that you know, but not fail. + log.Printf("unrecognized schema: %s", res.Val.GetSchema()) + } + } + + return nil +} + +func findPeers(w io.Writer, ctx context.Context, client *client.Client, pidStr string) error { + // Parses the given Peer ID to lookup the information for. + pid, err := peer.Decode(pidStr) + if err != nil { + return err + } + + // Ask for information about the peer with the given peer ID. + recordsIter, err := client.FindPeers(ctx, pid) + if err != nil { + return err + } + defer recordsIter.Close() + + // The response is streamed. Alternatively, you could use [iter.ReadAll] + // to fetch all the results all at once, instead of iterating as they are + // streamed. + for recordsIter.Next() { + res := recordsIter.Val() + + // Check for error, but do not complain if we exceeded the timeout. We are + // expecting that to happen: we explicitly defined a timeout. + if res.Err != nil { + if !errors.Is(res.Err, context.DeadlineExceeded) { + return res.Err + } + + return nil + } + + fmt.Fprintln(w, res.Val.ID) + fmt.Fprintln(w, "\tProtocols:", res.Val.Protocols) + fmt.Fprintln(w, "\tAddresses:", res.Val.Addrs) + } + + return nil +} + +func findIPNS(w io.Writer, ctx context.Context, client *client.Client, nameStr string) error { + // Parses the given name string to get a record for. + name, err := ipns.NameFromString(nameStr) + if err != nil { + return err + } + + // Fetch an IPNS record for the given name. [client.Client.GetIPNS] verifies + // if the retrieved record is valid against the given name, and errors otherwise. + record, err := client.GetIPNS(ctx, name) + if err != nil { + return err + } + + fmt.Fprintf(w, "/ipns/%s\n", name) + v, err := record.Value() + if err != nil { + return err + } + + // Since [client.Client.GetIPNS] verifies if the retrieved record is valid, we + // do not need to verify it again. However, if you were not using this specific + // client, but using some other tool, you should always validate the IPNS Record + // using the [ipns.Validate] or [ipns.ValidateWithName] functions. + fmt.Fprintln(w, "\tSignature: VALID") + fmt.Fprintln(w, "\tValue:", v.String()) + return nil +} diff --git a/examples/routing/delegated-routing-client/main_test.go b/examples/routing/delegated-routing-client/main_test.go new file mode 100644 index 000000000..2dab7b13a --- /dev/null +++ b/examples/routing/delegated-routing-client/main_test.go @@ -0,0 +1,95 @@ +package main + +import ( + "bytes" + "crypto/rand" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFindProviders(t *testing.T) { + cidStr := "bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/routing/v1/providers/"+cidStr { + w.Header().Set("Content-Type", "application/x-ndjson") + w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn","Addrs":["/ip4/111.222.222.111/tcp/5734"],"Protocols":["transport-bitswap"]}` + "\n")) + w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWB6RAWgcmHAP7TGEGK7utV2ZuqSzX1DNjRa97TtJ7139n","Addrs":["/ip4/127.0.0.1/tcp/5734"],"Protocols":["transport-horse"]}` + "\n")) + } + })) + t.Cleanup(ts.Close) + + out := &bytes.Buffer{} + err := run(out, ts.URL, cidStr, "", "", 1) + assert.Contains(t, out.String(), "12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn\n\tProtocols: [transport-bitswap]\n\tAddresses: [/ip4/111.222.222.111/tcp/5734]\n") + assert.Contains(t, out.String(), "12D3KooWB6RAWgcmHAP7TGEGK7utV2ZuqSzX1DNjRa97TtJ7139n\n\tProtocols: [transport-horse]\n\tAddresses: [/ip4/127.0.0.1/tcp/5734]\n") + assert.NoError(t, err) +} + +func TestFindPeers(t *testing.T) { + pidStr := "bafzaajaiaejcbkboq2tin6dkdc2vinbbn2dgowzn3u5izpjwxejheogw23scafkz" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/routing/v1/peers/"+pidStr { + w.Header().Set("Content-Type", "application/x-ndjson") + w.Write([]byte(`{"Schema":"peer","ID":"12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn","Addrs":["/ip4/111.222.222.111/tcp/5734"],"Protocols":["transport-bitswap"]}` + "\n")) + } + })) + t.Cleanup(ts.Close) + + out := &bytes.Buffer{} + err := run(out, ts.URL, "", pidStr, "", 1) + assert.Contains(t, out.String(), "12D3KooWM8sovaEGU1bmiWGWAzvs47DEcXKZZTuJnpQyVTkRs2Vn\n\tProtocols: [transport-bitswap]\n\tAddresses: [/ip4/111.222.222.111/tcp/5734]\n") + assert.NoError(t, err) +} + +func TestGetIPNS(t *testing.T) { + name, rec := makeNameAndRecord(t) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/routing/v1/ipns/"+name.String() { + w.Header().Set("Content-Type", "application/vnd.ipfs.ipns-record") + w.Write(rec) + } + })) + t.Cleanup(ts.Close) + + out := &bytes.Buffer{} + err := run(out, ts.URL, "", "", name.String(), 1) + assert.Contains(t, out.String(), fmt.Sprintf("/ipns/%s\n\tSignature: VALID\n\tValue: /ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4\n", name.String())) + assert.NoError(t, err) +} + +func makeNameAndRecord(t *testing.T) (ipns.Name, []byte) { + sk, _, err := crypto.GenerateEd25519Key(rand.Reader) + require.NoError(t, err) + + pid, err := peer.IDFromPrivateKey(sk) + require.NoError(t, err) + + cid, err := cid.Decode("bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4") + require.NoError(t, err) + + path := path.FromCid(cid) + eol := time.Now().Add(time.Hour * 48) + ttl := time.Second * 20 + + record, err := ipns.NewRecord(sk, path, 1, eol, ttl) + require.NoError(t, err) + + rawRecord, err := ipns.MarshalRecord(record) + require.NoError(t, err) + + return ipns.NameFromPeer(pid), rawRecord +} diff --git a/gateway/assets/assets.go b/gateway/assets/assets.go index 3c0265f0c..4a629c366 100644 --- a/gateway/assets/assets.go +++ b/gateway/assets/assets.go @@ -10,8 +10,7 @@ import ( "strings" "github.com/cespare/xxhash/v2" - - ipfspath "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" ) //go:embed *.html *.css @@ -132,7 +131,7 @@ type Breadcrumb struct { func Breadcrumbs(urlPath string, dnslinkOrigin bool) []Breadcrumb { var ret []Breadcrumb - p, err := ipfspath.ParsePath(urlPath) + p, err := path.NewPath(urlPath) if err != nil { // No assets.Breadcrumbs, fallback to bare Path in template return ret diff --git a/gateway/assets/test/main.go b/gateway/assets/test/main.go index c38c72057..c03074dab 100644 --- a/gateway/assets/test/main.go +++ b/gateway/assets/test/main.go @@ -159,8 +159,7 @@ func runTemplate(w http.ResponseWriter, filename string, data interface{}) { } err = tpl.Execute(w, data) if err != nil { - http.Error(w, fmt.Sprintf("failed to execute template: %s", err), http.StatusInternalServerError) - return + _, _ = w.Write([]byte(fmt.Sprintf("error during body generation: %v", err))) } } diff --git a/gateway/blocks_backend.go b/gateway/blocks_backend.go index 208c92062..a87b05c6b 100644 --- a/gateway/blocks_backend.go +++ b/gateway/blocks_backend.go @@ -7,22 +7,20 @@ import ( "fmt" "io" "net/http" - gopath "path" "strings" + "time" "github.com/ipfs/boxo/blockservice" blockstore "github.com/ipfs/boxo/blockstore" - nsopts "github.com/ipfs/boxo/coreiface/options/namesys" - ifacepath "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/fetcher" bsfetcher "github.com/ipfs/boxo/fetcher/impl/blockservice" "github.com/ipfs/boxo/files" "github.com/ipfs/boxo/ipld/merkledag" ufile "github.com/ipfs/boxo/ipld/unixfs/file" uio "github.com/ipfs/boxo/ipld/unixfs/io" + "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/namesys" - "github.com/ipfs/boxo/namesys/resolve" - ipfspath "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/path/resolver" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" @@ -41,7 +39,6 @@ import ( "github.com/ipld/go-ipld-prime/traversal/selector" selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" routinghelpers "github.com/libp2p/go-libp2p-routing-helpers" - "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" mc "github.com/multiformats/go-multicodec" @@ -69,6 +66,7 @@ var _ IPFSBackend = (*BlocksBackend)(nil) type blocksBackendOptions struct { ns namesys.NameSystem vs routing.ValueStore + r resolver.Resolver } // WithNameSystem sets the name system to use with the [BlocksBackend]. If not set @@ -89,6 +87,14 @@ func WithValueStore(vs routing.ValueStore) BlocksBackendOption { } } +// WithResolver sets the [resolver.Resolver] to use with the [BlocksBackend]. +func WithResolver(r resolver.Resolver) BlocksBackendOption { + return func(opts *blocksBackendOptions) error { + opts.r = r + return nil + } +} + type BlocksBackendOption func(options *blocksBackendOptions) error func NewBlocksBackend(blockService blockservice.BlockService, opts ...BlocksBackendOption) (*BlocksBackend, error) { @@ -102,21 +108,11 @@ func NewBlocksBackend(blockService blockservice.BlockService, opts ...BlocksBack // Setup the DAG services, which use the CAR block store. dagService := merkledag.NewDAGService(blockService) - // Setup the UnixFS resolver. - fetcherConfig := bsfetcher.NewFetcherConfig(blockService) - fetcherConfig.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { - if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { - return tlnkNd.LinkTargetNodePrototype(), nil - } - return basicnode.Prototype.Any, nil - }) - fetcher := fetcherConfig.WithReifier(unixfsnode.Reify) - r := resolver.NewBasicResolver(fetcher) - // Setup a name system so that we are able to resolve /ipns links. var ( ns namesys.NameSystem vs routing.ValueStore + r resolver.Resolver ) vs = compiledOptions.vs @@ -137,6 +133,15 @@ func NewBlocksBackend(blockService blockservice.BlockService, opts ...BlocksBack } } + r = compiledOptions.r + if r == nil { + // Setup the UnixFS resolver. + fetcherCfg := bsfetcher.NewFetcherConfig(blockService) + fetcherCfg.PrototypeChooser = dagpb.AddSupportToChooser(bsfetcher.DefaultPrototypeChooser) + fetcher := fetcherCfg.WithReifier(unixfsnode.Reify) + r = resolver.NewBasicResolver(fetcher) + } + return &BlocksBackend{ blockStore: blockService.Blockstore(), blockService: blockService, @@ -147,17 +152,40 @@ func NewBlocksBackend(blockService blockservice.BlockService, opts ...BlocksBack }, nil } -func (bb *BlocksBackend) Get(ctx context.Context, path ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { +func (bb *BlocksBackend) Get(ctx context.Context, path path.ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { md, nd, err := bb.getNode(ctx, path) if err != nil { return md, nil, err } + // Only a single range is supported in responses to HTTP Range Requests. + // When more than one is passed in the Range header, this library will + // return a response for the first one and ignores remaining ones. + var ra *ByteRange + if len(ranges) > 0 { + ra = &ranges[0] + } + rootCodec := nd.Cid().Prefix().GetCodec() + // This covers both Raw blocks and terminal IPLD codecs like dag-cbor and dag-json // Note: while only cbor, json, dag-cbor, and dag-json are currently supported by gateways this could change + // Note: For the raw codec we return just the relevant range rather than the entire block if rootCodec != uint64(mc.DagPb) { - return md, NewGetResponseFromFile(files.NewBytesFile(nd.RawData())), nil + f := files.NewBytesFile(nd.RawData()) + + fileSize, err := f.Size() + if err != nil { + return ContentPathMetadata{}, nil, err + } + + if rootCodec == uint64(mc.Raw) { + if err := seekToRangeStart(f, ra); err != nil { + return ContentPathMetadata{}, nil, err + } + } + + return md, NewGetResponseFromReader(f, fileSize), nil } // This code path covers full graph, single file/directory, and range requests @@ -179,16 +207,29 @@ func (bb *BlocksBackend) Get(ctx context.Context, path ImmutablePath, ranges ... if sz < 0 { return ContentPathMetadata{}, nil, fmt.Errorf("directory cumulative DAG size cannot be negative") } - return md, NewGetResponseFromDirectoryListing(uint64(sz), dir.EnumLinksAsync(ctx)), nil + return md, NewGetResponseFromDirectoryListing(uint64(sz), dir.EnumLinksAsync(ctx), nil), nil } if file, ok := f.(files.File); ok { - return md, NewGetResponseFromFile(file), nil + fileSize, err := f.Size() + if err != nil { + return ContentPathMetadata{}, nil, err + } + + if err := seekToRangeStart(file, ra); err != nil { + return ContentPathMetadata{}, nil, err + } + + if s, ok := f.(*files.Symlink); ok { + return md, NewGetResponseFromSymlink(s, fileSize), nil + } + + return md, NewGetResponseFromReader(file, fileSize), nil } return ContentPathMetadata{}, nil, fmt.Errorf("data was not a valid file or directory: %w", ErrInternalServerError) // TODO: should there be a gateway invalid content type to abstract over the various IPLD error types? } -func (bb *BlocksBackend) GetAll(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (bb *BlocksBackend) GetAll(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, files.Node, error) { md, nd, err := bb.getNode(ctx, path) if err != nil { return md, nil, err @@ -202,7 +243,7 @@ func (bb *BlocksBackend) GetAll(ctx context.Context, path ImmutablePath) (Conten return md, n, nil } -func (bb *BlocksBackend) GetBlock(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.File, error) { +func (bb *BlocksBackend) GetBlock(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, files.File, error) { md, nd, err := bb.getNode(ctx, path) if err != nil { return md, nil, err @@ -211,7 +252,7 @@ func (bb *BlocksBackend) GetBlock(ctx context.Context, path ImmutablePath) (Cont return md, files.NewBytesFile(nd.RawData()), nil } -func (bb *BlocksBackend) Head(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (bb *BlocksBackend) Head(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, *HeadResponse, error) { md, nd, err := bb.getNode(ctx, path) if err != nil { return md, nil, err @@ -219,7 +260,7 @@ func (bb *BlocksBackend) Head(ctx context.Context, path ImmutablePath) (ContentP rootCodec := nd.Cid().Prefix().GetCodec() if rootCodec != uint64(mc.DagPb) { - return md, files.NewBytesFile(nd.RawData()), nil + return md, NewHeadResponseForFile(files.NewBytesFile(nd.RawData()), int64(len(nd.RawData()))), nil } // TODO: We're not handling non-UnixFS dag-pb. There's a bit of a discrepancy @@ -229,7 +270,24 @@ func (bb *BlocksBackend) Head(ctx context.Context, path ImmutablePath) (ContentP return ContentPathMetadata{}, nil, err } - return md, fileNode, nil + sz, err := fileNode.Size() + if err != nil { + return ContentPathMetadata{}, nil, err + } + + if _, ok := fileNode.(files.Directory); ok { + return md, NewHeadResponseForDirectory(sz), nil + } + + if _, ok := fileNode.(*files.Symlink); ok { + return md, NewHeadResponseForSymlink(sz), nil + } + + if f, ok := fileNode.(files.File); ok { + return md, NewHeadResponseForFile(f, sz), nil + } + + return ContentPathMetadata{}, nil, fmt.Errorf("unsupported UnixFS file type") } // emptyRoot is a CAR root with the empty identity CID. CAR files are recommended @@ -237,9 +295,9 @@ func (bb *BlocksBackend) Head(ctx context.Context, path ImmutablePath) (ContentP // https://ipld.io/specs/transport/car/carv1/#number-of-roots var emptyRoot = []cid.Cid{cid.MustParse("bafkqaaa")} -func (bb *BlocksBackend) GetCAR(ctx context.Context, p ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { - pathMetadata, err := bb.ResolvePath(ctx, p) - if err != nil { +func (bb *BlocksBackend) GetCAR(ctx context.Context, p path.ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { + pathMetadata, resolveErr := bb.ResolvePath(ctx, p) + if resolveErr != nil { rootCid, err := cid.Decode(strings.Split(p.String(), "/")[2]) if err != nil { return ContentPathMetadata{}, nil, err @@ -261,21 +319,22 @@ func (bb *BlocksBackend) GetCAR(ctx context.Context, p ImmutablePath, params Car // Setup the UnixFS resolver. f := newNodeGetterFetcherSingleUseFactory(ctx, blockGetter) pathResolver := resolver.NewBasicResolver(f) - ip := ipfspath.FromString(p.String()) - _, _, err = pathResolver.ResolveToLastNode(ctx, ip) + _, _, err = pathResolver.ResolveToLastNode(ctx, p) if isErrNotFound(err) { return ContentPathMetadata{ PathSegmentRoots: nil, - LastSegment: ifacepath.NewResolvedPath(ip, rootCid, rootCid, ""), + LastSegment: path.FromCid(rootCid), ContentType: "", }, io.NopCloser(&buf), nil + } else if err != nil { + return ContentPathMetadata{}, nil, err + } else { + return ContentPathMetadata{}, nil, resolveErr } - return ContentPathMetadata{}, nil, err } - contentPathStr := p.String() - if !strings.HasPrefix(contentPathStr, "/ipfs/") { + if p.Namespace() != path.IPFSNamespace { return ContentPathMetadata{}, nil, fmt.Errorf("path does not have /ipfs/ prefix") } @@ -283,7 +342,7 @@ func (bb *BlocksBackend) GetCAR(ctx context.Context, p ImmutablePath, params Car go func() { cw, err := storage.NewWritable( w, - []cid.Cid{pathMetadata.LastSegment.Cid()}, + []cid.Cid{pathMetadata.LastSegment.RootCid()}, car.WriteAsCarV1(true), car.AllowDuplicatePuts(params.Duplicates.Bool()), ) @@ -310,7 +369,7 @@ func (bb *BlocksBackend) GetCAR(ctx context.Context, p ImmutablePath, params Car // TODO: support selectors passed as request param: https://github.com/ipfs/kubo/issues/8769 // TODO: this is very slow if blocks are remote due to linear traversal. Do we need deterministic traversals here? - carWriteErr := walkGatewaySimpleSelector(ctx, ipfspath.Path(contentPathStr), params, &lsys, pathResolver) + carWriteErr := walkGatewaySimpleSelector(ctx, p, params, &lsys, pathResolver) // io.PipeWriter.CloseWithError always returns nil. _ = w.CloseWithError(carWriteErr) @@ -320,7 +379,7 @@ func (bb *BlocksBackend) GetCAR(ctx context.Context, p ImmutablePath, params Car } // walkGatewaySimpleSelector walks the subgraph described by the path and terminal element parameters -func walkGatewaySimpleSelector(ctx context.Context, p ipfspath.Path, params CarParams, lsys *ipld.LinkSystem, pathResolver resolver.Resolver) error { +func walkGatewaySimpleSelector(ctx context.Context, p path.ImmutablePath, params CarParams, lsys *ipld.LinkSystem, pathResolver resolver.Resolver) error { // First resolve the path since we always need to. lastCid, remainder, err := pathResolver.ResolveToLastNode(ctx, p) if err != nil { @@ -488,18 +547,19 @@ func walkGatewaySimpleSelector(ctx context.Context, p ipfspath.Path, params CarP } } -func (bb *BlocksBackend) getNode(ctx context.Context, path ImmutablePath) (ContentPathMetadata, format.Node, error) { - roots, lastSeg, err := bb.getPathRoots(ctx, path) +func (bb *BlocksBackend) getNode(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, format.Node, error) { + roots, lastSeg, remainder, err := bb.getPathRoots(ctx, path) if err != nil { return ContentPathMetadata{}, nil, err } md := ContentPathMetadata{ - PathSegmentRoots: roots, - LastSegment: lastSeg, + PathSegmentRoots: roots, + LastSegment: lastSeg, + LastSegmentRemainder: remainder, } - lastRoot := lastSeg.Cid() + lastRoot := lastSeg.RootCid() nd, err := bb.dagService.Get(ctx, lastRoot) if err != nil { @@ -509,7 +569,7 @@ func (bb *BlocksBackend) getNode(ctx context.Context, path ImmutablePath) (Conte return md, nd, err } -func (bb *BlocksBackend) getPathRoots(ctx context.Context, contentPath ImmutablePath) ([]cid.Cid, ifacepath.Resolved, error) { +func (bb *BlocksBackend) getPathRoots(ctx context.Context, contentPath path.ImmutablePath) ([]cid.Cid, path.ImmutablePath, []string, error) { /* These are logical roots where each CID represent one path segment and resolves to either a directory or the root block of a file. @@ -533,56 +593,55 @@ func (bb *BlocksBackend) getPathRoots(ctx context.Context, contentPath Immutable contentPathStr := contentPath.String() pathSegments := strings.Split(contentPathStr[6:], "/") sp.WriteString(contentPathStr[:5]) // /ipfs or /ipns - var lastPath ifacepath.Resolved + var ( + lastPath path.ImmutablePath + remainder []string + ) for _, root := range pathSegments { if root == "" { continue } sp.WriteString("/") sp.WriteString(root) - resolvedSubPath, err := bb.resolvePath(ctx, ifacepath.New(sp.String())) + p, err := path.NewPath(sp.String()) + if err != nil { + return nil, path.ImmutablePath{}, nil, err + } + resolvedSubPath, remainderSubPath, err := bb.resolvePath(ctx, p) if err != nil { // TODO: should we be more explicit here and is this part of the IPFSBackend contract? // The issue here was that we returned datamodel.ErrWrongKind instead of this resolver error if isErrNotFound(err) { - return nil, nil, resolver.ErrNoLink{Name: root, Node: lastPath.Cid()} + return nil, path.ImmutablePath{}, nil, &resolver.ErrNoLink{Name: root, Node: lastPath.RootCid()} } - return nil, nil, err + return nil, path.ImmutablePath{}, nil, err } lastPath = resolvedSubPath - pathRoots = append(pathRoots, lastPath.Cid()) + remainder = remainderSubPath + pathRoots = append(pathRoots, lastPath.RootCid()) } pathRoots = pathRoots[:len(pathRoots)-1] - return pathRoots, lastPath, nil + return pathRoots, lastPath, remainder, nil } -func (bb *BlocksBackend) ResolveMutable(ctx context.Context, p ifacepath.Path) (ImmutablePath, error) { - err := p.IsValid() - if err != nil { - return ImmutablePath{}, err - } - - ipath := ipfspath.Path(p.String()) - switch ipath.Segments()[0] { - case "ipns": - ipath, err = resolve.ResolveIPNS(ctx, bb.namesys, ipath) - if err != nil { - return ImmutablePath{}, err - } - imPath, err := NewImmutablePath(ifacepath.New(ipath.String())) +func (bb *BlocksBackend) ResolveMutable(ctx context.Context, p path.Path) (path.ImmutablePath, time.Duration, time.Time, error) { + switch p.Namespace() { + case path.IPNSNamespace: + res, err := namesys.Resolve(ctx, bb.namesys, p) if err != nil { - return ImmutablePath{}, err + return path.ImmutablePath{}, 0, time.Time{}, err } - return imPath, nil - case "ipfs": - imPath, err := NewImmutablePath(ifacepath.New(ipath.String())) + ip, err := path.NewImmutablePath(res.Path) if err != nil { - return ImmutablePath{}, err + return path.ImmutablePath{}, 0, time.Time{}, err } - return imPath, nil + return ip, res.TTL, res.LastMod, nil + case path.IPFSNamespace: + ip, err := path.NewImmutablePath(p) + return ip, 0, time.Time{}, err default: - return ImmutablePath{}, NewErrorStatusCode(fmt.Errorf("unsupported path namespace: %s", p.Namespace()), http.StatusNotImplemented) + return path.ImmutablePath{}, 0, time.Time{}, NewErrorStatusCode(fmt.Errorf("unsupported path namespace: %s", p.Namespace()), http.StatusNotImplemented) } } @@ -591,88 +650,88 @@ func (bb *BlocksBackend) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, return nil, NewErrorStatusCode(errors.New("IPNS Record responses are not supported by this gateway"), http.StatusNotImplemented) } - // Fails fast if the CID is not an encoded Libp2p Key, avoids wasteful - // round trips to the remote routing provider. - if mc.Code(c.Type()) != mc.Libp2pKey { - return nil, NewErrorStatusCode(errors.New("cid codec must be libp2p-key"), http.StatusBadRequest) - } - - // The value store expects the key itself to be encoded as a multihash. - id, err := peer.FromCid(c) + name, err := ipns.NameFromCid(c) if err != nil { - return nil, err + return nil, NewErrorStatusCode(err, http.StatusBadRequest) } - return bb.routing.GetValue(ctx, "/ipns/"+string(id)) + return bb.routing.GetValue(ctx, string(name.RoutingKey())) } -func (bb *BlocksBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (ifacepath.Path, error) { +func (bb *BlocksBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (path.Path, error) { if bb.namesys != nil { - p, err := bb.namesys.Resolve(ctx, "/ipns/"+hostname, nsopts.Depth(1)) + p, err := path.NewPath("/ipns/" + hostname) + if err != nil { + return nil, err + } + res, err := bb.namesys.Resolve(ctx, p, namesys.ResolveWithDepth(1)) if err == namesys.ErrResolveRecursion { err = nil } - return ifacepath.New(p.String()), err + return res.Path, err } return nil, NewErrorStatusCode(errors.New("not implemented"), http.StatusNotImplemented) } -func (bb *BlocksBackend) IsCached(ctx context.Context, p ifacepath.Path) bool { - rp, err := bb.resolvePath(ctx, p) +func (bb *BlocksBackend) IsCached(ctx context.Context, p path.Path) bool { + rp, _, err := bb.resolvePath(ctx, p) if err != nil { return false } - has, _ := bb.blockStore.Has(ctx, rp.Cid()) + has, _ := bb.blockStore.Has(ctx, rp.RootCid()) return has } -func (bb *BlocksBackend) ResolvePath(ctx context.Context, path ImmutablePath) (ContentPathMetadata, error) { - roots, lastSeg, err := bb.getPathRoots(ctx, path) +func (bb *BlocksBackend) ResolvePath(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, error) { + roots, lastSeg, remainder, err := bb.getPathRoots(ctx, path) if err != nil { return ContentPathMetadata{}, err } md := ContentPathMetadata{ - PathSegmentRoots: roots, - LastSegment: lastSeg, + PathSegmentRoots: roots, + LastSegment: lastSeg, + LastSegmentRemainder: remainder, } return md, nil } -func (bb *BlocksBackend) resolvePath(ctx context.Context, p ifacepath.Path) (ifacepath.Resolved, error) { - if _, ok := p.(ifacepath.Resolved); ok { - return p.(ifacepath.Resolved), nil +func (bb *BlocksBackend) resolvePath(ctx context.Context, p path.Path) (path.ImmutablePath, []string, error) { + var err error + if p.Namespace() == path.IPNSNamespace { + res, err := namesys.Resolve(ctx, bb.namesys, p) + if err != nil { + return path.ImmutablePath{}, nil, err + } + p = res.Path } - err := p.IsValid() - if err != nil { - return nil, err + if p.Namespace() != path.IPFSNamespace { + return path.ImmutablePath{}, nil, fmt.Errorf("unsupported path namespace: %s", p.Namespace()) } - ipath := ipfspath.Path(p.String()) - if ipath.Segments()[0] == "ipns" { - ipath, err = resolve.ResolveIPNS(ctx, bb.namesys, ipath) - if err != nil { - return nil, err - } + imPath, err := path.NewImmutablePath(p) + if err != nil { + return path.ImmutablePath{}, nil, err } - if ipath.Segments()[0] != "ipfs" { - return nil, fmt.Errorf("unsupported path namespace: %s", p.Namespace()) + node, remainder, err := bb.resolver.ResolveToLastNode(ctx, imPath) + if err != nil { + return path.ImmutablePath{}, nil, err } - node, rest, err := bb.resolver.ResolveToLastNode(ctx, ipath) + p, err = path.Join(path.FromCid(node), remainder...) if err != nil { - return nil, err + return path.ImmutablePath{}, nil, err } - root, err := cid.Parse(ipath.Segments()[1]) + imPath, err = path.NewImmutablePath(p) if err != nil { - return nil, err + return path.ImmutablePath{}, nil, err } - return ifacepath.NewResolvedPath(ipath, node, root, gopath.Join(rest...)), nil + return imPath, remainder, nil } type nodeGetterToCarExporer struct { diff --git a/gateway/errors.go b/gateway/errors.go index a39bd2a01..4e4bb6823 100644 --- a/gateway/errors.go +++ b/gateway/errors.go @@ -149,6 +149,8 @@ func webError(w http.ResponseWriter, r *http.Request, c *Config, err error, defa code = http.StatusBadRequest case isErrNotFound(err): code = http.StatusNotFound + case isErrContentBlocked(err): + code = http.StatusGone case errors.Is(err, context.DeadlineExceeded): code = http.StatusGatewayTimeout } @@ -163,7 +165,7 @@ func webError(w http.ResponseWriter, r *http.Request, c *Config, err error, defa if acceptsHTML { w.Header().Set("Content-Type", "text/html") w.WriteHeader(code) - _ = assets.ErrorTemplate.Execute(w, assets.ErrorTemplateData{ + err = assets.ErrorTemplate.Execute(w, assets.ErrorTemplateData{ GlobalData: assets.GlobalData{ Menu: c.Menu, }, @@ -171,6 +173,9 @@ func webError(w http.ResponseWriter, r *http.Request, c *Config, err error, defa StatusText: http.StatusText(code), Error: err.Error(), }) + if err != nil { + _, _ = w.Write([]byte(fmt.Sprintf("error during body generation: %v", err))) + } } else { http.Error(w, err.Error(), code) } @@ -179,15 +184,14 @@ func webError(w http.ResponseWriter, r *http.Request, c *Config, err error, defa // isErrNotFound returns true for IPLD errors that should return 4xx errors (e.g. the path doesn't exist, the data is // the wrong type, etc.), rather than issues with just finding and retrieving the data. func isErrNotFound(err error) bool { + if errors.Is(err, &resolver.ErrNoLink{}) { + return true + } + // Checks if err is of a type that does not implement the .Is interface and // cannot be directly compared to. Therefore, errors.Is cannot be used. for { - _, ok := err.(resolver.ErrNoLink) - if ok { - return true - } - - _, ok = err.(datamodel.ErrWrongKind) + _, ok := err.(datamodel.ErrWrongKind) if ok { return true } @@ -203,3 +207,10 @@ func isErrNotFound(err error) bool { } } } + +// isErrContentBlocked returns true for content filtering system errors +func isErrContentBlocked(err error) bool { + // TODO: we match error message to avoid pulling nopfs as a dependency + // Ref. https://github.com/ipfs-shipyard/nopfs/blob/cde3b5ba964c13e977f4a95f3bd8ca7d7710fbda/status.go#L87-L89 + return strings.Contains(err.Error(), "blocked and cannot be provided") +} diff --git a/gateway/gateway.go b/gateway/gateway.go index 19b801bba..28cf5d295 100644 --- a/gateway/gateway.go +++ b/gateway/gateway.go @@ -8,11 +8,12 @@ import ( "sort" "strconv" "strings" + "time" - "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/files" "github.com/ipfs/boxo/gateway/assets" "github.com/ipfs/boxo/ipld/unixfs" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" ) @@ -93,38 +94,6 @@ type PublicGateway struct { DeserializedResponses bool } -// ImmutablePath represents a [path.Path] that is not mutable. -// -// TODO: Is this what we want for ImmutablePath? -type ImmutablePath struct { - p path.Path -} - -func NewImmutablePath(p path.Path) (ImmutablePath, error) { - if p.Mutable() { - return ImmutablePath{}, fmt.Errorf("path cannot be mutable") - } - return ImmutablePath{p: p}, nil -} - -func (i ImmutablePath) String() string { - return i.p.String() -} - -func (i ImmutablePath) Namespace() string { - return i.p.Namespace() -} - -func (i ImmutablePath) Mutable() bool { - return false -} - -func (i ImmutablePath) IsValid() error { - return i.p.IsValid() -} - -var _ path.Path = (*ImmutablePath)(nil) - type CarParams struct { Range *DagByteRange Scope DagScope @@ -205,7 +174,7 @@ const ( ) // DuplicateBlocksPolicy represents the content type parameter 'dups' (IPIP-412) -type DuplicateBlocksPolicy int +type DuplicateBlocksPolicy uint8 const ( DuplicateBlocksUnspecified DuplicateBlocksPolicy = iota // 0 - implicit default @@ -214,14 +183,16 @@ const ( ) // NewDuplicateBlocksPolicy returns DuplicateBlocksPolicy based on the content type parameter 'dups' (IPIP-412) -func NewDuplicateBlocksPolicy(dupsValue string) DuplicateBlocksPolicy { +func NewDuplicateBlocksPolicy(dupsValue string) (DuplicateBlocksPolicy, error) { switch dupsValue { case "y": - return DuplicateBlocksIncluded + return DuplicateBlocksIncluded, nil case "n": - return DuplicateBlocksExcluded + return DuplicateBlocksExcluded, nil + case "": + return DuplicateBlocksUnspecified, nil } - return DuplicateBlocksUnspecified + return 0, fmt.Errorf("unsupported application/vnd.ipld.car content type dups parameter: %q", dupsValue) } func (d DuplicateBlocksPolicy) Bool() bool { @@ -241,9 +212,10 @@ func (d DuplicateBlocksPolicy) String() string { } type ContentPathMetadata struct { - PathSegmentRoots []cid.Cid - LastSegment path.Resolved - ContentType string // Only used for UnixFS requests + PathSegmentRoots []cid.Cid + LastSegment path.ImmutablePath + LastSegmentRemainder []string + ContentType string // Only used for UnixFS requests } // ByteRange describes a range request within a UnixFS file. "From" and "To" mostly @@ -260,21 +232,74 @@ type ByteRange struct { } type GetResponse struct { - bytes files.File + bytes io.ReadCloser + bytesSize int64 + symlink *files.Symlink directoryMetadata *directoryMetadata } +func (r *GetResponse) Close() error { + if r.bytes != nil { + return r.bytes.Close() + } + if r.symlink != nil { + return r.symlink.Close() + } + if r.directoryMetadata != nil { + if r.directoryMetadata.closeFn == nil { + return nil + } + return r.directoryMetadata.closeFn() + } + // Should be unreachable + return nil +} + +var _ io.Closer = (*GetResponse)(nil) + type directoryMetadata struct { dagSize uint64 entries <-chan unixfs.LinkResult + closeFn func() error +} + +func NewGetResponseFromReader(file io.ReadCloser, fullFileSize int64) *GetResponse { + return &GetResponse{bytes: file, bytesSize: fullFileSize} +} + +func NewGetResponseFromSymlink(symlink *files.Symlink, size int64) *GetResponse { + return &GetResponse{symlink: symlink, bytesSize: size} +} + +func NewGetResponseFromDirectoryListing(dagSize uint64, entries <-chan unixfs.LinkResult, closeFn func() error) *GetResponse { + return &GetResponse{directoryMetadata: &directoryMetadata{dagSize: dagSize, entries: entries, closeFn: closeFn}} } -func NewGetResponseFromFile(file files.File) *GetResponse { - return &GetResponse{bytes: file} +type HeadResponse struct { + bytesSize int64 + startingBytes io.ReadCloser + isFile bool + isSymLink bool + isDir bool } -func NewGetResponseFromDirectoryListing(dagSize uint64, entries <-chan unixfs.LinkResult) *GetResponse { - return &GetResponse{directoryMetadata: &directoryMetadata{dagSize, entries}} +func (r *HeadResponse) Close() error { + if r.startingBytes != nil { + return r.startingBytes.Close() + } + return nil +} + +func NewHeadResponseForFile(startingBytes io.ReadCloser, size int64) *HeadResponse { + return &HeadResponse{startingBytes: startingBytes, isFile: true, bytesSize: size} +} + +func NewHeadResponseForSymlink(symlinkSize int64) *HeadResponse { + return &HeadResponse{isSymLink: true, bytesSize: symlinkSize} +} + +func NewHeadResponseForDirectory(dagSize int64) *HeadResponse { + return &HeadResponse{isDir: true, bytesSize: dagSize} } // IPFSBackend is the required set of functionality used to implement the IPFS @@ -305,32 +330,39 @@ type IPFSBackend interface { // file will still need magic bytes from the very beginning for content // type sniffing). // - A range request for a directory currently holds no semantic meaning. + // - For non-UnixFS (and non-raw data) such as terminal IPLD dag-cbor/json, etc. blocks the returned response + // bytes should be the complete block and returned as an [io.ReadSeekCloser] starting at the beginning of the + // block rather than as an [io.ReadCloser] that starts at the beginning of the range request. // // [HTTP Byte Ranges]: https://httpwg.org/specs/rfc9110.html#rfc.section.14.1.2 - Get(context.Context, ImmutablePath, ...ByteRange) (ContentPathMetadata, *GetResponse, error) + Get(context.Context, path.ImmutablePath, ...ByteRange) (ContentPathMetadata, *GetResponse, error) // GetAll returns a UnixFS file or directory depending on what the path is that has been requested. Directories should // include all content recursively. - GetAll(context.Context, ImmutablePath) (ContentPathMetadata, files.Node, error) + GetAll(context.Context, path.ImmutablePath) (ContentPathMetadata, files.Node, error) // GetBlock returns a single block of data - GetBlock(context.Context, ImmutablePath) (ContentPathMetadata, files.File, error) + GetBlock(context.Context, path.ImmutablePath) (ContentPathMetadata, files.File, error) - // Head returns a file or directory depending on what the path is that has been requested. - // For UnixFS files should return a file which has the correct file size and either returns the ContentType in ContentPathMetadata or - // enough data (e.g. 3kiB) such that the content type can be determined by sniffing. - // For all other data types returning just size information is sufficient - // TODO: give function more explicit return types - Head(context.Context, ImmutablePath) (ContentPathMetadata, files.Node, error) + // Head returns a [HeadResponse] depending on what the path is that has been requested. + // For UnixFS files (and raw blocks) should return the size of the file and either set the ContentType in + // ContentPathMetadata or send back a reader from the beginning of the file with enough data (e.g. 3kiB) such that + // the content type can be determined by sniffing. + // + // For UnixFS directories and symlinks only setting the size and type are necessary. + // + // For all other data types (e.g. (DAG-)CBOR/JSON blocks) returning the size information as a file while setting + // the content-type is sufficient. + Head(context.Context, path.ImmutablePath) (ContentPathMetadata, *HeadResponse, error) // ResolvePath resolves the path using UnixFS resolver. If the path does not // exist due to a missing link, it should return an error of type: // NewErrorResponse(fmt.Errorf("no link named %q under %s", name, cid), http.StatusNotFound) - ResolvePath(context.Context, ImmutablePath) (ContentPathMetadata, error) + ResolvePath(context.Context, path.ImmutablePath) (ContentPathMetadata, error) // GetCAR returns a CAR file for the given immutable path. It returns an error // if there was an issue before the CAR streaming begins. - GetCAR(context.Context, ImmutablePath, CarParams) (ContentPathMetadata, io.ReadCloser, error) + GetCAR(context.Context, path.ImmutablePath, CarParams) (ContentPathMetadata, io.ReadCloser, error) // IsCached returns whether or not the path exists locally. IsCached(context.Context, path.Path) bool @@ -340,11 +372,11 @@ type IPFSBackend interface { GetIPNSRecord(context.Context, cid.Cid) ([]byte, error) // ResolveMutable takes a mutable path and resolves it into an immutable one. This means recursively resolving any - // DNSLink or IPNS records. + // DNSLink or IPNS records. It should also return a TTL. If the TTL is unknown, 0 should be returned. // // For example, given a mapping from `/ipns/dnslink.tld -> /ipns/ipns-id/mydirectory` and `/ipns/ipns-id` to // `/ipfs/some-cid`, the result of passing `/ipns/dnslink.tld/myfile` would be `/ipfs/some-cid/mydirectory/myfile`. - ResolveMutable(context.Context, path.Path) (ImmutablePath, error) + ResolveMutable(context.Context, path.Path) (path.ImmutablePath, time.Duration, time.Time, error) // GetDNSLinkRecord returns the DNSLink TXT record for the provided FQDN. // Unlike ResolvePath, it does not perform recursive resolution. It only diff --git a/gateway/gateway_test.go b/gateway/gateway_test.go index 98996acb3..53f19ca08 100644 --- a/gateway/gateway_test.go +++ b/gateway/gateway_test.go @@ -9,10 +9,9 @@ import ( "testing" "time" - ipath "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/files" "github.com/ipfs/boxo/namesys" - path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/path/resolver" "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" @@ -26,14 +25,23 @@ func TestGatewayGet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - k, err := backend.resolvePathNoRootsReturned(ctx, ipath.Join(ipath.IpfsPath(root), "subdir", "fnord")) + p, err := path.Join(path.FromCid(root), "subdir", "fnord") require.NoError(t, err) - backend.namesys["/ipns/example.com"] = path.FromCid(k.Cid()) - backend.namesys["/ipns/working.example.com"] = path.FromString(k.String()) - backend.namesys["/ipns/double.example.com"] = path.FromString("/ipns/working.example.com") - backend.namesys["/ipns/triple.example.com"] = path.FromString("/ipns/double.example.com") - backend.namesys["/ipns/broken.example.com"] = path.FromString("/ipns/" + k.Cid().String()) + k, err := backend.resolvePathNoRootsReturned(ctx, p) + require.NoError(t, err) + + mustMakeDNSLinkPath := func(domain string) path.Path { + p, err := path.NewPath("/ipns/" + domain) + require.NoError(t, err) + return p + } + + backend.namesys["/ipns/example.com"] = newMockNamesysItem(path.FromCid(k.RootCid()), 0) + backend.namesys["/ipns/working.example.com"] = newMockNamesysItem(k, 0) + backend.namesys["/ipns/double.example.com"] = newMockNamesysItem(mustMakeDNSLinkPath("working.example.com"), 0) + backend.namesys["/ipns/triple.example.com"] = newMockNamesysItem(mustMakeDNSLinkPath("double.example.com"), 0) + backend.namesys["/ipns/broken.example.com"] = newMockNamesysItem(mustMakeDNSLinkPath(k.RootCid().String()), 0) // We picked .man because: // 1. It's a valid TLD. // 2. Go treats it as the file extension for "man" files (even though @@ -41,7 +49,7 @@ func TestGatewayGet(t *testing.T) { // // Unfortunately, this may not work on all platforms as file type // detection is platform dependent. - backend.namesys["/ipns/example.man"] = path.FromString(k.String()) + backend.namesys["/ipns/example.man"] = newMockNamesysItem(k, 0) for _, test := range []struct { host string @@ -50,10 +58,10 @@ func TestGatewayGet(t *testing.T) { text string }{ {"127.0.0.1:8080", "/", http.StatusNotFound, "404 page not found\n"}, - {"127.0.0.1:8080", "/ipfs", http.StatusBadRequest, "invalid path \"/ipfs/\": not enough path components\n"}, - {"127.0.0.1:8080", "/ipns", http.StatusBadRequest, "invalid path \"/ipns/\": not enough path components\n"}, - {"127.0.0.1:8080", "/" + k.Cid().String(), http.StatusNotFound, "404 page not found\n"}, - {"127.0.0.1:8080", "/ipfs/this-is-not-a-cid", http.StatusBadRequest, "invalid path \"/ipfs/this-is-not-a-cid\": invalid CID: invalid cid: illegal base32 data at input byte 3\n"}, + {"127.0.0.1:8080", "/ipfs", http.StatusBadRequest, "invalid path \"/ipfs/\": path does not have enough components\n"}, + {"127.0.0.1:8080", "/ipns", http.StatusBadRequest, "invalid path \"/ipns/\": path does not have enough components\n"}, + {"127.0.0.1:8080", "/" + k.RootCid().String(), http.StatusNotFound, "404 page not found\n"}, + {"127.0.0.1:8080", "/ipfs/this-is-not-a-cid", http.StatusBadRequest, "invalid path \"/ipfs/this-is-not-a-cid\": invalid cid: illegal base32 data at input byte 3\n"}, {"127.0.0.1:8080", k.String(), http.StatusOK, "fnord"}, {"127.0.0.1:8080", "/ipns/nxdomain.example.com", http.StatusInternalServerError, "failed to resolve /ipns/nxdomain.example.com: " + namesys.ErrResolveFailed.Error() + "\n"}, {"127.0.0.1:8080", "/ipns/%0D%0A%0D%0Ahello", http.StatusInternalServerError, "failed to resolve /ipns/\\r\\n\\r\\nhello: " + namesys.ErrResolveFailed.Error() + "\n"}, @@ -64,7 +72,7 @@ func TestGatewayGet(t *testing.T) { {"working.example.com", "/", http.StatusOK, "fnord"}, {"double.example.com", "/", http.StatusOK, "fnord"}, {"triple.example.com", "/", http.StatusOK, "fnord"}, - {"working.example.com", k.String(), http.StatusNotFound, "failed to resolve /ipns/working.example.com" + k.String() + ": no link named \"ipfs\" under " + k.Cid().String() + "\n"}, + {"working.example.com", k.String(), http.StatusNotFound, "failed to resolve /ipns/working.example.com" + k.String() + ": no link named \"ipfs\" under " + k.RootCid().String() + "\n"}, {"broken.example.com", "/", http.StatusInternalServerError, "failed to resolve /ipns/broken.example.com/: " + namesys.ErrResolveFailed.Error() + "\n"}, {"broken.example.com", k.String(), http.StatusInternalServerError, "failed to resolve /ipns/broken.example.com" + k.String() + ": " + namesys.ErrResolveFailed.Error() + "\n"}, // This test case ensures we don't treat the TLD as a file extension. @@ -85,46 +93,6 @@ func TestGatewayGet(t *testing.T) { } } -func TestPretty404(t *testing.T) { - ts, backend, root := newTestServerAndNode(t, nil, "pretty-404.car") - t.Logf("test server url: %s", ts.URL) - - host := "example.net" - backend.namesys["/ipns/"+host] = path.FromCid(root) - - for _, test := range []struct { - path string - accept string - status int - text string - }{ - {"/ipfs-404.html", "text/html", http.StatusOK, "Custom 404"}, - {"/nope", "text/html", http.StatusNotFound, "Custom 404"}, - {"/nope", "text/*", http.StatusNotFound, "Custom 404"}, - {"/nope", "*/*", http.StatusNotFound, "Custom 404"}, - {"/nope", "application/json", http.StatusNotFound, fmt.Sprintf("failed to resolve /ipns/example.net/nope: no link named \"nope\" under %s\n", root.String())}, - {"/deeper/nope", "text/html", http.StatusNotFound, "Deep custom 404"}, - {"/deeper/", "text/html", http.StatusOK, ""}, - {"/deeper", "text/html", http.StatusOK, ""}, - {"/nope/nope", "text/html", http.StatusNotFound, "Custom 404"}, - } { - testName := fmt.Sprintf("%s %s", test.path, test.accept) - t.Run(testName, func(t *testing.T) { - req := mustNewRequest(t, "GET", ts.URL+test.path, nil) - req.Header.Add("Accept", test.accept) - req.Host = host - resp := mustDo(t, req) - defer resp.Body.Close() - require.Equal(t, test.status, resp.StatusCode) - body, err := io.ReadAll(resp.Body) - require.NoError(t, err) - if test.text != "" { - require.Equal(t, test.text, string(body)) - } - }) - } -} - func TestHeaders(t *testing.T) { t.Parallel() @@ -150,7 +118,41 @@ func TestHeaders(t *testing.T) { dagCborRoots = dirRoots + "," + dagCborCID ) - t.Run("Cache-Control is not immutable on generated /ipfs/ HTML dir listings", func(t *testing.T) { + t.Run("Cache-Control uses TTL for /ipns/ when it is known", func(t *testing.T) { + t.Parallel() + + ts, backend, root := newTestServerAndNode(t, nil, "ipns-hostname-redirects.car") + backend.namesys["/ipns/example.net"] = newMockNamesysItem(path.FromCid(root), time.Second*30) + backend.namesys["/ipns/example.com"] = newMockNamesysItem(path.FromCid(root), time.Second*55) + backend.namesys["/ipns/unknown.com"] = newMockNamesysItem(path.FromCid(root), 0) + + testCases := []struct { + path string + cacheControl string + }{ + {"/ipns/example.net/", "public, max-age=30"}, // As generated directory listing + {"/ipns/example.com/", "public, max-age=55"}, // As generated directory listing (different) + {"/ipns/unknown.com/", ""}, // As generated directory listing (unknown) + {"/ipns/example.net/foo/", "public, max-age=30"}, // As index.html directory listing + {"/ipns/example.net/foo/index.html", "public, max-age=30"}, // As deserialized UnixFS file + {"/ipns/example.net/?format=raw", "public, max-age=30"}, // As Raw block + {"/ipns/example.net/?format=dag-json", "public, max-age=30"}, // As DAG-JSON block + {"/ipns/example.net/?format=dag-cbor", "public, max-age=30"}, // As DAG-CBOR block + {"/ipns/example.net/?format=car", "public, max-age=30"}, // As CAR block + } + + for _, testCase := range testCases { + req := mustNewRequest(t, http.MethodGet, ts.URL+testCase.path, nil) + res := mustDoWithoutRedirect(t, req) + if testCase.cacheControl == "" { + assert.Empty(t, res.Header["Cache-Control"]) + } else { + assert.Equal(t, testCase.cacheControl, res.Header.Get("Cache-Control")) + } + } + }) + + t.Run("Cache-Control is not immutable on generated /ipfs/ HTML dir listings", func(t *testing.T) { req := mustNewRequest(t, http.MethodGet, ts.URL+"/ipfs/"+rootCID+"/", nil) res := mustDoWithoutRedirect(t, req) @@ -492,7 +494,7 @@ func TestRedirects(t *testing.T) { t.Parallel() ts, backend, root := newTestServerAndNode(t, nil, "ipns-hostname-redirects.car") - backend.namesys["/ipns/example.net"] = path.FromCid(root) + backend.namesys["/ipns/example.net"] = newMockNamesysItem(path.FromCid(root), 0) // make request to directory containing index.html req := mustNewRequest(t, http.MethodGet, ts.URL+"/foo", nil) @@ -527,7 +529,7 @@ func TestRedirects(t *testing.T) { t.Parallel() backend, root := newMockBackend(t, "redirects-spa.car") - backend.namesys["/ipns/example.com"] = path.FromCid(root) + backend.namesys["/ipns/example.com"] = newMockNamesysItem(path.FromCid(root), 0) ts := newTestServerWithConfig(t, backend, Config{ Headers: map[string][]string{}, @@ -664,8 +666,8 @@ func TestDeserializedResponses(t *testing.T) { t.Parallel() backend, root := newMockBackend(t, "fixtures.car") - backend.namesys["/ipns/trustless.com"] = path.FromCid(root) - backend.namesys["/ipns/trusted.com"] = path.FromCid(root) + backend.namesys["/ipns/trustless.com"] = newMockNamesysItem(path.FromCid(root), 0) + backend.namesys["/ipns/trusted.com"] = newMockNamesysItem(path.FromCid(root), 0) ts := newTestServerWithConfig(t, backend, Config{ Headers: map[string][]string{}, @@ -707,43 +709,43 @@ type errorMockBackend struct { err error } -func (mb *errorMockBackend) Get(ctx context.Context, path ImmutablePath, getRange ...ByteRange) (ContentPathMetadata, *GetResponse, error) { +func (mb *errorMockBackend) Get(ctx context.Context, path path.ImmutablePath, getRange ...ByteRange) (ContentPathMetadata, *GetResponse, error) { return ContentPathMetadata{}, nil, mb.err } -func (mb *errorMockBackend) GetAll(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (mb *errorMockBackend) GetAll(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, files.Node, error) { return ContentPathMetadata{}, nil, mb.err } -func (mb *errorMockBackend) GetBlock(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.File, error) { +func (mb *errorMockBackend) GetBlock(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, files.File, error) { return ContentPathMetadata{}, nil, mb.err } -func (mb *errorMockBackend) Head(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (mb *errorMockBackend) Head(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, *HeadResponse, error) { return ContentPathMetadata{}, nil, mb.err } -func (mb *errorMockBackend) GetCAR(ctx context.Context, path ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { +func (mb *errorMockBackend) GetCAR(ctx context.Context, path path.ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { return ContentPathMetadata{}, nil, mb.err } -func (mb *errorMockBackend) ResolveMutable(ctx context.Context, path ipath.Path) (ImmutablePath, error) { - return ImmutablePath{}, mb.err +func (mb *errorMockBackend) ResolveMutable(ctx context.Context, p path.Path) (path.ImmutablePath, time.Duration, time.Time, error) { + return path.ImmutablePath{}, 0, time.Time{}, mb.err } func (mb *errorMockBackend) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, error) { return nil, mb.err } -func (mb *errorMockBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (ipath.Path, error) { +func (mb *errorMockBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (path.Path, error) { return nil, mb.err } -func (mb *errorMockBackend) IsCached(ctx context.Context, p ipath.Path) bool { +func (mb *errorMockBackend) IsCached(ctx context.Context, p path.Path) bool { return false } -func (mb *errorMockBackend) ResolvePath(ctx context.Context, path ImmutablePath) (ContentPathMetadata, error) { +func (mb *errorMockBackend) ResolvePath(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, error) { return ContentPathMetadata{}, mb.err } @@ -763,7 +765,7 @@ func TestErrorBubblingFromBackend(t *testing.T) { } testError("500 Not Found from IPLD", &ipld.ErrNotFound{}, http.StatusInternalServerError) - testError("404 Not Found from path resolver", resolver.ErrNoLink{}, http.StatusNotFound) + testError("404 Not Found from path resolver", &resolver.ErrNoLink{}, http.StatusNotFound) testError("502 Bad Gateway", ErrBadGateway, http.StatusBadGateway) testError("504 Gateway Timeout", ErrGatewayTimeout, http.StatusGatewayTimeout) @@ -791,27 +793,27 @@ type panicMockBackend struct { panicOnHostnameHandler bool } -func (mb *panicMockBackend) Get(ctx context.Context, immutablePath ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { +func (mb *panicMockBackend) Get(ctx context.Context, immutablePath path.ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { panic("i am panicking") } -func (mb *panicMockBackend) GetAll(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (mb *panicMockBackend) GetAll(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, files.Node, error) { panic("i am panicking") } -func (mb *panicMockBackend) GetBlock(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, files.File, error) { +func (mb *panicMockBackend) GetBlock(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, files.File, error) { panic("i am panicking") } -func (mb *panicMockBackend) Head(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (mb *panicMockBackend) Head(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, *HeadResponse, error) { panic("i am panicking") } -func (mb *panicMockBackend) GetCAR(ctx context.Context, immutablePath ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { +func (mb *panicMockBackend) GetCAR(ctx context.Context, immutablePath path.ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { panic("i am panicking") } -func (mb *panicMockBackend) ResolveMutable(ctx context.Context, p ipath.Path) (ImmutablePath, error) { +func (mb *panicMockBackend) ResolveMutable(ctx context.Context, p path.Path) (path.ImmutablePath, time.Duration, time.Time, error) { panic("i am panicking") } @@ -819,7 +821,7 @@ func (mb *panicMockBackend) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byt panic("i am panicking") } -func (mb *panicMockBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (ipath.Path, error) { +func (mb *panicMockBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (path.Path, error) { // GetDNSLinkRecord is also called on the WithHostname handler. We have this option // to disable panicking here so we can test if both the regular gateway handler // and the hostname handler can handle panics. @@ -830,11 +832,11 @@ func (mb *panicMockBackend) GetDNSLinkRecord(ctx context.Context, hostname strin return nil, errors.New("not implemented") } -func (mb *panicMockBackend) IsCached(ctx context.Context, p ipath.Path) bool { +func (mb *panicMockBackend) IsCached(ctx context.Context, p path.Path) bool { panic("i am panicking") } -func (mb *panicMockBackend) ResolvePath(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, error) { +func (mb *panicMockBackend) ResolvePath(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, error) { panic("i am panicking") } diff --git a/gateway/handler.go b/gateway/handler.go index ecf505617..29a816b7a 100644 --- a/gateway/handler.go +++ b/gateway/handler.go @@ -16,9 +16,9 @@ import ( "strings" "time" - ipath "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/gateway/assets" "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p/core/peer" @@ -34,7 +34,7 @@ var log = logging.Logger("boxo/gateway") const ( ipfsPathPrefix = "/ipfs/" - ipnsPathPrefix = "/ipns/" + ipnsPathPrefix = ipns.NamespacePrefix immutableCacheControl = "public, max-age=29030400, immutable" ) @@ -92,13 +92,19 @@ func NewHandler(c Config, backend IPFSBackend) http.Handler { return newHandlerWithMetrics(&c, backend) } -// serveContent replies to the request using the content in the provided ReadSeeker +// serveContent replies to the request using the content in the provided Reader // and returns the status code written and any error encountered during a write. -// It wraps http.serveContent which takes care of If-None-Match+Etag, +// It wraps httpServeContent (a close clone of http.ServeContent) which takes care of If-None-Match+Etag, // Content-Length and range requests. -func serveContent(w http.ResponseWriter, req *http.Request, name string, modtime time.Time, content io.ReadSeeker) (int, bool, error) { +// +// Notes: +// 1. For HEAD requests the io.Reader may be nil/undefined +// 2. When the io.Reader is needed it must start at the beginning of the first Range Request component if it exists +// 3. Only a single HTTP Range Request is supported, if more than one are requested only the first will be honored +// 4. The Content-Type header must already be set +func serveContent(w http.ResponseWriter, req *http.Request, modtime time.Time, size int64, content io.Reader) (int, bool, error) { ew := &errRecordingResponseWriter{ResponseWriter: w} - http.ServeContent(ew, req, name, modtime, content) + httpServeContent(ew, req, modtime, size, content) // When we calculate some metrics we want a flag that lets us to ignore // errors and 304 Not Modified, and only care when requested data @@ -182,12 +188,14 @@ type requestData struct { // Defined for all requests. begin time.Time logger *zap.SugaredLogger - contentPath ipath.Path + contentPath path.Path responseFormat string responseParams map[string]string // Defined for non IPNS Record requests. - immutablePath ImmutablePath + immutablePath path.ImmutablePath + ttl time.Duration + lastMod time.Time // Defined if resolution has already happened. pathMetadata *ContentPathMetadata @@ -196,9 +204,9 @@ type requestData struct { // mostlyResolvedPath is an opportunistic optimization that returns the mostly // resolved version of ImmutablePath available. It does not guarantee it is fully // resolved, nor that it is the original. -func (rq *requestData) mostlyResolvedPath() ImmutablePath { +func (rq *requestData) mostlyResolvedPath() path.ImmutablePath { if rq.pathMetadata != nil { - imPath, err := NewImmutablePath(rq.pathMetadata.LastSegment) + imPath, err := path.NewImmutablePath(rq.pathMetadata.LastSegment) if err != nil { // This will never happen. This error has previously been checked in // [handleIfNoneMatch] and the request will have returned 500. @@ -217,12 +225,18 @@ func (i *handler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { if handleProtocolHandlerRedirect(w, r, i.config) || i.handleServiceWorkerRegistration(w, r) || - handleIpnsB58mhToCidRedirection(w, r) { + handleIpnsB58mhToCidRedirection(w, r) || + i.handleSuperfluousNamespace(w, r) { return } var success bool - contentPath := ipath.New(r.URL.Path) + contentPath, err := path.NewPath(r.URL.Path) + if err != nil { + i.webError(w, r, err, http.StatusBadRequest) + return + } + ctx := context.WithValue(r.Context(), ContentPathKey, contentPath) r = r.WithContext(ctx) @@ -232,13 +246,7 @@ func (i *handler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { } }() - if i.handleOnlyIfCached(w, r, contentPath) || - i.handleSuperfluousNamespace(w, r, contentPath) { - return - } - - if err := contentPath.IsValid(); err != nil { - i.webError(w, r, err, http.StatusBadRequest) + if i.handleOnlyIfCached(w, r, contentPath) { return } @@ -279,14 +287,14 @@ func (i *handler) getOrHeadHandler(w http.ResponseWriter, r *http.Request) { } if contentPath.Mutable() { - rq.immutablePath, err = i.backend.ResolveMutable(r.Context(), contentPath) + rq.immutablePath, rq.ttl, rq.lastMod, err = i.backend.ResolveMutable(r.Context(), contentPath) if err != nil { err = fmt.Errorf("failed to resolve %s: %w", debugStr(contentPath.String()), err) i.webError(w, r, err, http.StatusInternalServerError) return } } else { - rq.immutablePath, err = NewImmutablePath(contentPath) + rq.immutablePath, err = path.NewImmutablePath(contentPath) if err != nil { err = fmt.Errorf("path was expected to be immutable, but was not %s: %w", debugStr(contentPath.String()), err) i.webError(w, r, err, http.StatusInternalServerError) @@ -365,7 +373,7 @@ func (i *handler) isDeserializedResponsePossible(r *http.Request) bool { // in the [Trustless Gateway] spec. // // [Trustless Gateway]: https://specs.ipfs.tech/http-gateways/trustless-gateway/ -func (i *handler) isTrustlessRequest(contentPath ipath.Path, responseFormat string) bool { +func (i *handler) isTrustlessRequest(contentPath path.Path, responseFormat string) bool { // Only allow "/{#1}/{#2}"-like paths. trimmedPath := strings.Trim(contentPath.String(), "/") pathComponents := strings.Split(trimmedPath, "/") @@ -373,7 +381,7 @@ func (i *handler) isTrustlessRequest(contentPath ipath.Path, responseFormat stri return false } - if contentPath.Namespace() == "ipns" { + if contentPath.Namespace() == path.IPNSNamespace { // TODO: only ipns records allowed until https://github.com/ipfs/specs/issues/369 is resolved if responseFormat != ipnsRecordResponseFormat { return false @@ -409,7 +417,7 @@ func panicHandler(w http.ResponseWriter) { } } -func addCacheControlHeaders(w http.ResponseWriter, r *http.Request, contentPath ipath.Path, cid cid.Cid, responseFormat string) (modtime time.Time) { +func addCacheControlHeaders(w http.ResponseWriter, r *http.Request, contentPath path.Path, ttl time.Duration, lastMod time.Time, cid cid.Cid, responseFormat string) (modtime time.Time) { // Best effort attempt to set an Etag based on the CID and response format. // Setting an ETag is handled separately for CARs and IPNS records. if etag := getEtag(r, cid, responseFormat); etag != "" { @@ -418,23 +426,24 @@ func addCacheControlHeaders(w http.ResponseWriter, r *http.Request, contentPath // Set Cache-Control and Last-Modified based on contentPath properties if contentPath.Mutable() { - // mutable namespaces such as /ipns/ can't be cached forever - - // For now we set Last-Modified to Now() to leverage caching heuristics built into modern browsers: - // https://github.com/ipfs/kubo/pull/8074#pullrequestreview-645196768 - // but we should not set it to fake values and use Cache-Control based on TTL instead - modtime = time.Now() + if ttl > 0 { + // When we know the TTL, set the Cache-Control header and disable Last-Modified. + w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", int(ttl.Seconds()))) + } - // TODO: set Cache-Control based on TTL of IPNS/DNSLink: https://github.com/ipfs/kubo/issues/1818#issuecomment-1015849462 - // TODO: set Last-Modified based on /ipns/ publishing timestamp? + if lastMod.IsZero() { + // Otherwise, we set Last-Modified to the current time to leverage caching heuristics + // built into modern browsers: https://github.com/ipfs/kubo/pull/8074#pullrequestreview-645196768 + modtime = time.Now() + } else { + modtime = lastMod + } } else { - // immutable! CACHE ALL THE THINGS, FOREVER! wolololol w.Header().Set("Cache-Control", immutableCacheControl) + modtime = noModtime // disable Last-Modified - // Set modtime to 'zero time' to disable Last-Modified header (superseded by Cache-Control) - modtime = noModtime - - // TODO: set Last-Modified? - TBD - /ipfs/ modification metadata is present in unixfs 1.5 https://github.com/ipfs/kubo/issues/6920? + // TODO: consider setting Last-Modified if UnixFS V1.5 ever gets released + // with metadata: https://github.com/ipfs/kubo/issues/6920 } return modtime @@ -446,7 +455,7 @@ func addCacheControlHeaders(w http.ResponseWriter, r *http.Request, contentPath // - Creation of HTML links that trigger "Save As.." dialog instead of being rendered by the browser // - Overriding the filename used when saving sub-resource assets on HTML page // - providing a default filename for HTTP clients when downloading direct /ipfs/CID without any subpath -func addContentDispositionHeader(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) string { +func addContentDispositionHeader(w http.ResponseWriter, r *http.Request, contentPath path.Path) string { // URL param ?filename=cat.jpg triggers Content-Disposition: [..] filename // which impacts default name used in "Save As.." dialog name := getFilename(contentPath) @@ -464,7 +473,7 @@ func addContentDispositionHeader(w http.ResponseWriter, r *http.Request, content return name } -func getFilename(contentPath ipath.Path) string { +func getFilename(contentPath path.Path) string { s := contentPath.String() if (strings.HasPrefix(s, ipfsPathPrefix) || strings.HasPrefix(s, ipnsPathPrefix)) && strings.Count(gopath.Clean(s), "/") <= 2 { // Don't want to treat ipfs.io in /ipns/ipfs.io as a filename. @@ -514,7 +523,7 @@ func setIpfsRootsHeader(w http.ResponseWriter, rq *requestData, md *ContentPathM for _, c := range rq.pathMetadata.PathSegmentRoots { pathRoots = append(pathRoots, c.String()) } - pathRoots = append(pathRoots, rq.pathMetadata.LastSegment.Cid().String()) + pathRoots = append(pathRoots, rq.pathMetadata.LastSegment.RootCid().String()) rootCidList := strings.Join(pathRoots, ",") // convention from rfc2616#sec4.2 w.Header().Set("X-Ipfs-Roots", rootCidList) @@ -554,40 +563,6 @@ func etagMatch(ifNoneMatchHeader string, etagsToCheck ...string) bool { return false } -// scanETag determines if a syntactically valid ETag is present at s. If so, -// the ETag and remaining text after consuming ETag is returned. Otherwise, -// it returns "", "". -// (This is the same logic as one executed inside of http.ServeContent) -func scanETag(s string) (etag string, remain string) { - s = textproto.TrimString(s) - start := 0 - if strings.HasPrefix(s, "W/") { - start = 2 - } - if len(s[start:]) < 2 || s[start] != '"' { - return "", "" - } - // ETag is either W/"text" or "text". - // See RFC 7232 2.3. - for i := start + 1; i < len(s); i++ { - c := s[i] - switch { - // Character values allowed in ETags. - case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80: - case c == '"': - return s[:i+1], s[i+1:] - default: - return "", "" - } - } - return "", "" -} - -// etagWeakMatch reports whether a and b match using weak ETag comparison. -func etagWeakMatch(a, b string) bool { - return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/") -} - // getEtag generates an ETag value based on an HTTP Request, a CID and a response // format. This function DOES NOT generate ETags for CARs or IPNS Records. func getEtag(r *http.Request, cid cid.Cid, responseFormat string) string { @@ -703,7 +678,7 @@ func (i *handler) handleIfNoneMatch(w http.ResponseWriter, r *http.Request, rq * if ifNoneMatch := r.Header.Get("If-None-Match"); ifNoneMatch != "" { pathMetadata, err := i.backend.ResolvePath(r.Context(), rq.immutablePath) if err != nil { - var forwardedPath ImmutablePath + var forwardedPath path.ImmutablePath var continueProcessing bool if isWebRequest(rq.responseFormat) { forwardedPath, continueProcessing = i.handleWebRequestErrors(w, r, rq.mostlyResolvedPath(), rq.immutablePath, rq.contentPath, err, rq.logger) @@ -718,7 +693,7 @@ func (i *handler) handleIfNoneMatch(w http.ResponseWriter, r *http.Request, rq * } } - pathCid := pathMetadata.LastSegment.Cid() + pathCid := pathMetadata.LastSegment.RootCid() // Checks against both file, dir listing, and dag index Etags. // This is an inexpensive check, and it happens before we do any I/O. @@ -733,7 +708,7 @@ func (i *handler) handleIfNoneMatch(w http.ResponseWriter, r *http.Request, rq * } // Check if the resolvedPath is an immutable path. - _, err = NewImmutablePath(pathMetadata.LastSegment) + _, err = path.NewImmutablePath(pathMetadata.LastSegment) if err != nil { i.webError(w, r, err, http.StatusInternalServerError) return true @@ -754,7 +729,7 @@ func isWebRequest(responseFormat string) bool { } // handleRequestErrors is used when request type is other than Web+UnixFS -func (i *handler) handleRequestErrors(w http.ResponseWriter, r *http.Request, contentPath ipath.Path, err error) bool { +func (i *handler) handleRequestErrors(w http.ResponseWriter, r *http.Request, contentPath path.Path, err error) bool { if err == nil { return true } @@ -765,7 +740,7 @@ func (i *handler) handleRequestErrors(w http.ResponseWriter, r *http.Request, co // handleWebRequestErrors is used when request type is Web+UnixFS and err could // be a 404 (Not Found) that should be recovered via _redirects file (IPIP-290) -func (i *handler) handleWebRequestErrors(w http.ResponseWriter, r *http.Request, maybeResolvedImPath, immutableContentPath ImmutablePath, contentPath ipath.Path, err error, logger *zap.SugaredLogger) (ImmutablePath, bool) { +func (i *handler) handleWebRequestErrors(w http.ResponseWriter, r *http.Request, maybeResolvedImPath, immutableContentPath path.ImmutablePath, contentPath path.Path, err error, logger *zap.SugaredLogger) (path.ImmutablePath, bool) { if err == nil { return maybeResolvedImPath, true } @@ -773,7 +748,14 @@ func (i *handler) handleWebRequestErrors(w http.ResponseWriter, r *http.Request, if errors.Is(err, ErrServiceUnavailable) { err = fmt.Errorf("failed to resolve %s: %w", debugStr(contentPath.String()), err) i.webError(w, r, err, http.StatusServiceUnavailable) - return ImmutablePath{}, false + return path.ImmutablePath{}, false + } + + // If the error is not an IPLD traversal error then we should not be looking for _redirects or legacy 404s + if !isErrNotFound(err) { + err = fmt.Errorf("failed to resolve %s: %w", debugStr(contentPath.String()), err) + i.webError(w, r, err, http.StatusInternalServerError) + return path.ImmutablePath{}, false } // If we have origin isolation (subdomain gw, DNSLink website), @@ -788,24 +770,14 @@ func (i *handler) handleWebRequestErrors(w http.ResponseWriter, r *http.Request, } } - // if Accept is text/html, see if ipfs-404.html is present - // This logic isn't documented and will likely be removed at some point. - // Any 404 logic in _redirects above will have already run by this time, so it's really an extra fall back - // PLEASE do not use this for new websites, - // follow https://docs.ipfs.tech/how-to/websites-on-ipfs/redirects-and-custom-404s/ instead. - if i.serveLegacy404IfPresent(w, r, immutableContentPath, logger) { - logger.Debugw("served legacy 404") - return ImmutablePath{}, false - } - err = fmt.Errorf("failed to resolve %s: %w", debugStr(contentPath.String()), err) i.webError(w, r, err, http.StatusInternalServerError) - return ImmutablePath{}, false + return path.ImmutablePath{}, false } // Detect 'Cache-Control: only-if-cached' in request and return data if it is already in the local datastore. // https://github.com/ipfs/specs/blob/main/http-gateways/PATH_GATEWAY.md#cache-control-request-header -func (i *handler) handleOnlyIfCached(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) bool { +func (i *handler) handleOnlyIfCached(w http.ResponseWriter, r *http.Request, contentPath path.Path) bool { if r.Header.Get("Cache-Control") == "only-if-cached" { if !i.backend.IsCached(r.Context(), contentPath) { if r.Method == http.MethodHead { @@ -915,23 +887,19 @@ func handleIpnsB58mhToCidRedirection(w http.ResponseWriter, r *http.Request) boo // 'intended' path is valid. This is in case gremlins were tickled // wrong way and user ended up at /ipfs/ipfs/{cid} or /ipfs/ipns/{id} // like in bafybeien3m7mdn6imm425vc2s22erzyhbvk5n3ofzgikkhmdkh5cuqbpbq :^)) -func (i *handler) handleSuperfluousNamespace(w http.ResponseWriter, r *http.Request, contentPath ipath.Path) bool { - // If the path is valid, there's nothing to do - if pathErr := contentPath.IsValid(); pathErr == nil { - return false - } - +func (i *handler) handleSuperfluousNamespace(w http.ResponseWriter, r *http.Request) bool { // If there's no superflous namespace, there's nothing to do if !(strings.HasPrefix(r.URL.Path, "/ipfs/ipfs/") || strings.HasPrefix(r.URL.Path, "/ipfs/ipns/")) { return false } // Attempt to fix the superflous namespace - intendedPath := ipath.New(strings.TrimPrefix(r.URL.Path, "/ipfs")) - if err := intendedPath.IsValid(); err != nil { + intendedPath, err := path.NewPath(strings.TrimPrefix(r.URL.Path, "/ipfs")) + if err != nil { i.webError(w, r, fmt.Errorf("invalid ipfs path: %w", err), http.StatusBadRequest) return true } + intendedURL := intendedPath.String() if r.URL.RawQuery != "" { // we render HTML, so ensure query entries are properly escaped @@ -944,19 +912,20 @@ func (i *handler) handleSuperfluousNamespace(w http.ResponseWriter, r *http.Requ // - redirects to intendedURL after a short delay w.WriteHeader(http.StatusBadRequest) - if err := redirectTemplate.Execute(w, redirectTemplateData{ + err = redirectTemplate.Execute(w, redirectTemplateData{ RedirectURL: intendedURL, SuggestedPath: intendedPath.String(), ErrorMsg: fmt.Sprintf("invalid path: %q should be %q", r.URL.Path, intendedPath.String()), - }); err != nil { - i.webError(w, r, fmt.Errorf("failed to redirect when fixing superfluous namespace: %w", err), http.StatusBadRequest) + }) + if err != nil { + _, _ = w.Write([]byte(fmt.Sprintf("error during body generation: %v", err))) } return true } // getTemplateGlobalData returns the global data necessary by most templates. -func (i *handler) getTemplateGlobalData(r *http.Request, contentPath ipath.Path) assets.GlobalData { +func (i *handler) getTemplateGlobalData(r *http.Request, contentPath path.Path) assets.GlobalData { // gatewayURL is used to link to other root CIDs. THis will be blank unless // subdomain or DNSLink resolution is being used for this request. var gatewayURL string diff --git a/gateway/handler_block.go b/gateway/handler_block.go index dbff9a7ad..e6bf2267f 100644 --- a/gateway/handler_block.go +++ b/gateway/handler_block.go @@ -22,7 +22,7 @@ func (i *handler) serveRawBlock(ctx context.Context, w http.ResponseWriter, r *h setIpfsRootsHeader(w, rq, &pathMetadata) - blockCid := pathMetadata.LastSegment.Cid() + blockCid := pathMetadata.LastSegment.RootCid() // Set Content-Disposition var name string @@ -34,13 +34,23 @@ func (i *handler) serveRawBlock(ctx context.Context, w http.ResponseWriter, r *h setContentDispositionHeader(w, name, "attachment") // Set remaining headers - modtime := addCacheControlHeaders(w, r, rq.contentPath, blockCid, rawResponseFormat) + modtime := addCacheControlHeaders(w, r, rq.contentPath, rq.ttl, rq.lastMod, blockCid, rawResponseFormat) w.Header().Set("Content-Type", rawResponseFormat) w.Header().Set("X-Content-Type-Options", "nosniff") // no funny business in the browsers :^) + sz, err := data.Size() + if err != nil { + i.handleRequestErrors(w, r, rq.contentPath, err) + return false + } + + if !i.seekToStartOfFirstRange(w, r, data) { + return false + } + // ServeContent will take care of // If-None-Match+Etag, Content-Length and range requests - _, dataSent, _ := serveContent(w, r, name, modtime, data) + _, dataSent, _ := serveContent(w, r, modtime, sz, data) if dataSent { // Update metrics diff --git a/gateway/handler_car.go b/gateway/handler_car.go index 000e0dc9c..21b90108c 100644 --- a/gateway/handler_car.go +++ b/gateway/handler_car.go @@ -2,6 +2,7 @@ package gateway import ( "context" + "encoding/binary" "fmt" "io" "net/http" @@ -10,6 +11,7 @@ import ( "time" "github.com/cespare/xxhash/v2" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" "go.opentelemetry.io/otel/attribute" @@ -56,7 +58,7 @@ func (i *handler) serveCAR(ctx context.Context, w http.ResponseWriter, r *http.R setContentDispositionHeader(w, name, "attachment") // Set Cache-Control (same logic as for a regular files) - addCacheControlHeaders(w, r, rq.contentPath, rootCid, carResponseFormat) + addCacheControlHeaders(w, r, rq.contentPath, rq.ttl, rq.lastMod, rootCid, carResponseFormat) // Generate the CAR Etag. etag := getCarEtag(rq.immutablePath, params, rootCid) @@ -165,20 +167,18 @@ func buildCarParams(r *http.Request, contentTypeParams map[string]string) (CarPa } // optional dups from IPIP-412 - if dups := NewDuplicateBlocksPolicy(contentTypeParams["dups"]); dups != DuplicateBlocksUnspecified { - switch dups { - case DuplicateBlocksExcluded, DuplicateBlocksIncluded: - params.Duplicates = dups - default: - return CarParams{}, fmt.Errorf("unsupported application/vnd.ipld.car content type dups parameter: %q", dups) - } - } else { + dups, err := NewDuplicateBlocksPolicy(contentTypeParams["dups"]) + if err != nil { + return CarParams{}, err + } + if dups == DuplicateBlocksUnspecified { // when duplicate block preference is not specified, we set it to // false, as this has always been the default behavior, we should // not break legacy clients, and responses to requests made via ?format=car // should benefit from block deduplication - params.Duplicates = DuplicateBlocksExcluded + dups = DuplicateBlocksExcluded } + params.Duplicates = dups return params, nil } @@ -203,7 +203,7 @@ func buildContentTypeFromCarParams(params CarParams) string { return h.String() } -func getCarRootCidAndLastSegment(imPath ImmutablePath) (cid.Cid, string, error) { +func getCarRootCidAndLastSegment(imPath path.ImmutablePath) (cid.Cid, string, error) { imPathStr := imPath.String() if !strings.HasPrefix(imPathStr, "/ipfs/") { return cid.Undef, "", fmt.Errorf("path does not have /ipfs/ prefix") @@ -224,32 +224,44 @@ func getCarRootCidAndLastSegment(imPath ImmutablePath) (cid.Cid, string, error) return rootCid, lastSegment, err } -func getCarEtag(imPath ImmutablePath, params CarParams, rootCid cid.Cid) string { - data := imPath.String() +func getCarEtag(imPath path.ImmutablePath, params CarParams, rootCid cid.Cid) string { + h := xxhash.New() + h.WriteString(imPath.String()) + // be careful with hashes here, we need boundaries and per entry salt, we don't want a request that has: + // - scope = dfs + // and: + // - order = dfs + // to result in the same hash because if we just do hash(scope + order) they would both yield hash("dfs"). if params.Scope != DagScopeAll { - data += string(params.Scope) + h.WriteString("\x00scope=") + h.WriteString(string(params.Scope)) } // 'order' from IPIP-412 impact Etag only if set to something else // than DFS (which is the implicit default) if params.Order != DagOrderDFS { - data += string(params.Order) + h.WriteString("\x00order=") + h.WriteString(string(params.Order)) } // 'dups' from IPIP-412 impact Etag only if 'y' - if dups := params.Duplicates.String(); dups == "y" { - data += dups + if dups := params.Duplicates; dups == DuplicateBlocksIncluded { + h.WriteString("\x00dups=y") } if params.Range != nil { if params.Range.From != 0 || params.Range.To != nil { - data += strconv.FormatInt(params.Range.From, 10) + h.WriteString("\x00range=") + var b [8]byte + binary.LittleEndian.PutUint64(b[:], uint64(params.Range.From)) + h.Write(b[:]) if params.Range.To != nil { - data += strconv.FormatInt(*params.Range.To, 10) + binary.LittleEndian.PutUint64(b[:], uint64(*params.Range.To)) + h.Write(b[:]) } } } - suffix := strconv.FormatUint(xxhash.Sum64([]byte(data)), 32) + suffix := strconv.FormatUint(h.Sum64(), 32) return `W/"` + rootCid.String() + ".car." + suffix + `"` } diff --git a/gateway/handler_car_test.go b/gateway/handler_car_test.go index 65777453d..da2d16255 100644 --- a/gateway/handler_car_test.go +++ b/gateway/handler_car_test.go @@ -4,7 +4,7 @@ import ( "net/http" "testing" - "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -110,6 +110,30 @@ func TestCarParams(t *testing.T) { require.Equal(t, test.expectedDuplicates.String(), params.Duplicates.String()) } }) + + t.Run("buildCarParams from Accept header: order and dups parsing (invalid)", func(t *testing.T) { + t.Parallel() + + // below ensure the implicit default (DFS and no duplicates) is correctly inferred + // from the value read from Accept header + tests := []string{ + "application/vnd.ipld.car; dups=invalid", + "application/vnd.ipld.car; order=invalid", + "application/vnd.ipld.car; order=dfs; dups=invalid", + "application/vnd.ipld.car; order=invalid; dups=y", + } + for _, test := range tests { + r := mustNewRequest(t, http.MethodGet, "http://example.com/", nil) + r.Header.Set("Accept", test) + + mediaType, formatParams, err := customResponseFormat(r) + assert.NoError(t, err) + assert.Equal(t, carResponseFormat, mediaType) + + _, err = buildCarParams(r, formatParams) + assert.ErrorContains(t, err, "unsupported application/vnd.ipld.car content type") + } + }) } func TestContentTypeFromCarParams(t *testing.T) { @@ -143,7 +167,7 @@ func TestGetCarEtag(t *testing.T) { cid, err := cid.Parse("bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4") require.NoError(t, err) - imPath, err := NewImmutablePath(path.IpfsPath(cid)) + imPath, err := path.NewImmutablePath(path.FromCid(cid)) require.NoError(t, err) t.Run("Etag with entity-bytes=0:* is the same as without query param", func(t *testing.T) { diff --git a/gateway/handler_codec.go b/gateway/handler_codec.go index 007a52fda..89bff966e 100644 --- a/gateway/handler_codec.go +++ b/gateway/handler_codec.go @@ -10,8 +10,8 @@ import ( "strings" "time" - ipath "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/gateway/assets" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime/multicodec" "github.com/ipld/go-ipld-prime/node/basicnode" @@ -69,24 +69,31 @@ func (i *handler) serveCodec(ctx context.Context, w http.ResponseWriter, r *http defer data.Close() setIpfsRootsHeader(w, rq, &pathMetadata) - return i.renderCodec(ctx, w, r, rq, data) + + blockSize, err := data.Size() + if !i.handleRequestErrors(w, r, rq.contentPath, err) { + return false + } + + return i.renderCodec(ctx, w, r, rq, blockSize, data) } -func (i *handler) renderCodec(ctx context.Context, w http.ResponseWriter, r *http.Request, rq *requestData, blockData io.ReadSeekCloser) bool { +func (i *handler) renderCodec(ctx context.Context, w http.ResponseWriter, r *http.Request, rq *requestData, blockSize int64, blockData io.ReadSeekCloser) bool { resolvedPath := rq.pathMetadata.LastSegment ctx, span := spanTrace(ctx, "Handler.RenderCodec", trace.WithAttributes(attribute.String("path", resolvedPath.String()), attribute.String("requestedContentType", rq.responseFormat))) defer span.End() - blockCid := resolvedPath.Cid() + blockCid := resolvedPath.RootCid() cidCodec := mc.Code(blockCid.Prefix().Codec) responseContentType := rq.responseFormat // If the resolved path still has some remainder, return error for now. // TODO: handle this when we have IPLD Patch (https://ipld.io/specs/patch/) via HTTP PUT // TODO: (depends on https://github.com/ipfs/kubo/issues/4801 and https://github.com/ipfs/kubo/issues/4782) - if resolvedPath.Remainder() != "" { - path := strings.TrimSuffix(resolvedPath.String(), resolvedPath.Remainder()) - err := fmt.Errorf("%q of %q could not be returned: reading IPLD Kinds other than Links (CBOR Tag 42) is not implemented: try reading %q instead", resolvedPath.Remainder(), resolvedPath.String(), path) + if len(rq.pathMetadata.LastSegmentRemainder) != 0 { + remainderStr := path.SegmentsToString(rq.pathMetadata.LastSegmentRemainder...) + path := strings.TrimSuffix(resolvedPath.String(), remainderStr) + err := fmt.Errorf("%q of %q could not be returned: reading IPLD Kinds other than Links (CBOR Tag 42) is not implemented: try reading %q instead", remainderStr, resolvedPath.String(), path) i.webError(w, r, err, http.StatusNotImplemented) return false } @@ -104,8 +111,8 @@ func (i *handler) renderCodec(ctx context.Context, w http.ResponseWriter, r *htt } // Set HTTP headers (for caching, etc). Etag will be replaced if handled by serveCodecHTML. - modtime := addCacheControlHeaders(w, r, rq.contentPath, resolvedPath.Cid(), responseContentType) - name := setCodecContentDisposition(w, r, resolvedPath, responseContentType) + modtime := addCacheControlHeaders(w, r, rq.contentPath, rq.ttl, rq.lastMod, resolvedPath.RootCid(), responseContentType) + _ = setCodecContentDisposition(w, r, resolvedPath, responseContentType) w.Header().Set("Content-Type", responseContentType) w.Header().Set("X-Content-Type-Options", "nosniff") @@ -121,7 +128,7 @@ func (i *handler) renderCodec(ctx context.Context, w http.ResponseWriter, r *htt } else { // This covers CIDs with codec 'json' and 'cbor' as those do not have // an explicit requested content type. - return i.serveCodecRaw(ctx, w, r, blockData, rq.contentPath, name, modtime, rq.begin) + return i.serveCodecRaw(ctx, w, r, blockSize, blockData, rq.contentPath, modtime, rq.begin) } } @@ -131,7 +138,7 @@ func (i *handler) renderCodec(ctx context.Context, w http.ResponseWriter, r *htt if ok { for _, skipCodec := range skipCodecs { if skipCodec == cidCodec { - return i.serveCodecRaw(ctx, w, r, blockData, rq.contentPath, name, modtime, rq.begin) + return i.serveCodecRaw(ctx, w, r, blockSize, blockData, rq.contentPath, modtime, rq.begin) } } } @@ -149,7 +156,7 @@ func (i *handler) renderCodec(ctx context.Context, w http.ResponseWriter, r *htt return i.serveCodecConverted(ctx, w, r, blockCid, blockData, rq.contentPath, toCodec, modtime, rq.begin) } -func (i *handler) serveCodecHTML(ctx context.Context, w http.ResponseWriter, r *http.Request, blockCid cid.Cid, blockData io.ReadSeekCloser, resolvedPath ipath.Resolved, contentPath ipath.Path) bool { +func (i *handler) serveCodecHTML(ctx context.Context, w http.ResponseWriter, r *http.Request, blockCid cid.Cid, blockData io.Reader, resolvedPath path.ImmutablePath, contentPath path.Path) bool { // WithHostname may have constructed an IPFS (or IPNS) path using the Host header. // In this case, we need the original path for constructing the redirect. requestURI, err := url.ParseRequestURI(r.RequestURI) @@ -179,7 +186,7 @@ func (i *handler) serveCodecHTML(ctx context.Context, w http.ResponseWriter, r * w.Header().Del("Content-Disposition") // Generated index requires custom Etag (output may change between Kubo versions) - dagEtag := getDagIndexEtag(resolvedPath.Cid()) + dagEtag := getDagIndexEtag(resolvedPath.RootCid()) w.Header().Set("Etag", dagEtag) // Remove Cache-Control for now to match UnixFS dir-index-html responses @@ -187,27 +194,26 @@ func (i *handler) serveCodecHTML(ctx context.Context, w http.ResponseWriter, r * // TODO: if we ever change behavior for UnixFS dir listings, same changes should be applied here w.Header().Del("Cache-Control") - cidCodec := mc.Code(resolvedPath.Cid().Prefix().Codec) - if err := assets.DagTemplate.Execute(w, assets.DagTemplateData{ + cidCodec := mc.Code(resolvedPath.RootCid().Prefix().Codec) + err = assets.DagTemplate.Execute(w, assets.DagTemplateData{ GlobalData: i.getTemplateGlobalData(r, contentPath), Path: contentPath.String(), - CID: resolvedPath.Cid().String(), + CID: resolvedPath.RootCid().String(), CodecName: cidCodec.String(), CodecHex: fmt.Sprintf("0x%x", uint64(cidCodec)), Node: parseNode(blockCid, blockData), - }); err != nil { - err = fmt.Errorf("failed to generate HTML listing for this DAG: try fetching raw block with ?format=raw: %w", err) - i.webError(w, r, err, http.StatusInternalServerError) - return false + }) + if err != nil { + _, _ = w.Write([]byte(fmt.Sprintf("error during body generation: %v", err))) } - return true + return err == nil } // parseNode does a best effort attempt to parse this request's block such that // a preview can be displayed in the gateway. If something fails along the way, // returns nil, therefore not displaying the preview. -func parseNode(blockCid cid.Cid, blockData io.ReadSeekCloser) *assets.ParsedNode { +func parseNode(blockCid cid.Cid, blockData io.Reader) *assets.ParsedNode { codec := blockCid.Prefix().Codec decoder, err := multicodec.LookupDecoder(codec) if err != nil { @@ -229,10 +235,14 @@ func parseNode(blockCid cid.Cid, blockData io.ReadSeekCloser) *assets.ParsedNode } // serveCodecRaw returns the raw block without any conversion -func (i *handler) serveCodecRaw(ctx context.Context, w http.ResponseWriter, r *http.Request, blockData io.ReadSeekCloser, contentPath ipath.Path, name string, modtime, begin time.Time) bool { +func (i *handler) serveCodecRaw(ctx context.Context, w http.ResponseWriter, r *http.Request, blockSize int64, blockData io.ReadSeekCloser, contentPath path.Path, modtime, begin time.Time) bool { // ServeContent will take care of - // If-None-Match+Etag, Content-Length and range requests - _, dataSent, _ := serveContent(w, r, name, modtime, blockData) + // If-None-Match+Etag, Content-Length and setting range request headers after we've already seeked to the start of + // the first range + if !i.seekToStartOfFirstRange(w, r, blockData) { + return false + } + _, dataSent, _ := serveContent(w, r, modtime, blockSize, blockData) if dataSent { // Update metrics @@ -243,7 +253,7 @@ func (i *handler) serveCodecRaw(ctx context.Context, w http.ResponseWriter, r *h } // serveCodecConverted returns payload converted to codec specified in toCodec -func (i *handler) serveCodecConverted(ctx context.Context, w http.ResponseWriter, r *http.Request, blockCid cid.Cid, blockData io.ReadSeekCloser, contentPath ipath.Path, toCodec mc.Code, modtime, begin time.Time) bool { +func (i *handler) serveCodecConverted(ctx context.Context, w http.ResponseWriter, r *http.Request, blockCid cid.Cid, blockData io.ReadCloser, contentPath path.Path, toCodec mc.Code, modtime, begin time.Time) bool { codec := blockCid.Prefix().Codec decoder, err := multicodec.LookupDecoder(codec) if err != nil { @@ -288,7 +298,7 @@ func (i *handler) serveCodecConverted(ctx context.Context, w http.ResponseWriter return false } -func setCodecContentDisposition(w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentType string) string { +func setCodecContentDisposition(w http.ResponseWriter, r *http.Request, resolvedPath path.ImmutablePath, contentType string) string { var dispType, name string ext, ok := contentTypeToExtension[contentType] @@ -300,7 +310,7 @@ func setCodecContentDisposition(w http.ResponseWriter, r *http.Request, resolved if urlFilename := r.URL.Query().Get("filename"); urlFilename != "" { name = urlFilename } else { - name = resolvedPath.Cid().String() + ext + name = resolvedPath.RootCid().String() + ext } // JSON should be inlined, but ?download=true should still override diff --git a/gateway/handler_codec_test.go b/gateway/handler_codec_test.go index c79b07689..d22579027 100644 --- a/gateway/handler_codec_test.go +++ b/gateway/handler_codec_test.go @@ -7,7 +7,7 @@ import ( "net/http" "testing" - ipath "github.com/ipfs/boxo/coreiface/path" + "github.com/ipfs/boxo/path" "github.com/stretchr/testify/require" ) @@ -31,10 +31,13 @@ func TestDagJsonCborPreview(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - resolvedPath, err := backend.resolvePathNoRootsReturned(ctx, ipath.Join(ipath.IpfsPath(root), "subdir", "dag-cbor-document")) + p, err := path.Join(path.FromCid(root), "subdir", "dag-cbor-document") require.NoError(t, err) - cidStr := resolvedPath.Cid().String() + resolvedPath, err := backend.resolvePathNoRootsReturned(ctx, p) + require.NoError(t, err) + + cidStr := resolvedPath.RootCid().String() t.Run("path gateway normalizes to trailing slash", func(t *testing.T) { t.Parallel() diff --git a/gateway/handler_defaults.go b/gateway/handler_defaults.go index de31c1fc1..648d51328 100644 --- a/gateway/handler_defaults.go +++ b/gateway/handler_defaults.go @@ -4,12 +4,12 @@ import ( "context" "errors" "fmt" + "io" "net/http" "net/textproto" "strconv" "strings" - "github.com/ipfs/boxo/files" mc "github.com/multiformats/go-multicodec" "go.opentelemetry.io/otel/attribute" @@ -21,25 +21,23 @@ func (i *handler) serveDefaults(ctx context.Context, w http.ResponseWriter, r *h defer span.End() var ( - pathMetadata ContentPathMetadata - bytesResponse files.File - isDirectoryHeadRequest bool - directoryMetadata *directoryMetadata - err error - ranges []ByteRange + pathMetadata ContentPathMetadata + err error + ranges []ByteRange + headResp *HeadResponse + getResp *GetResponse ) switch r.Method { case http.MethodHead: - var data files.Node - pathMetadata, data, err = i.backend.Head(ctx, rq.mostlyResolvedPath()) + pathMetadata, headResp, err = i.backend.Head(ctx, rq.mostlyResolvedPath()) if err != nil { if isWebRequest(rq.responseFormat) { forwardedPath, continueProcessing := i.handleWebRequestErrors(w, r, rq.mostlyResolvedPath(), rq.immutablePath, rq.contentPath, err, rq.logger) if !continueProcessing { return false } - pathMetadata, data, err = i.backend.Head(ctx, forwardedPath) + pathMetadata, headResp, err = i.backend.Head(ctx, forwardedPath) if err != nil { err = fmt.Errorf("failed to resolve %s: %w", debugStr(rq.contentPath.String()), err) i.webError(w, r, err, http.StatusInternalServerError) @@ -51,30 +49,21 @@ func (i *handler) serveDefaults(ctx context.Context, w http.ResponseWriter, r *h } } } - defer data.Close() - if _, ok := data.(files.Directory); ok { - isDirectoryHeadRequest = true - } else if f, ok := data.(files.File); ok { - bytesResponse = f - } else { - i.webError(w, r, fmt.Errorf("unsupported response type"), http.StatusInternalServerError) - return false - } + defer headResp.Close() case http.MethodGet: rangeHeader := r.Header.Get("Range") if rangeHeader != "" { // TODO: Add tests for range parsing - ranges, err = parseRange(rangeHeader) + ranges, err = parseRangeWithoutLength(rangeHeader) if err != nil { i.webError(w, r, fmt.Errorf("invalid range request: %w", err), http.StatusBadRequest) return false } } - var getResp *GetResponse // TODO: passing only resolved path here, instead of contentPath is // harming content routing. Knowing original immutableContentPath will - // allow backend to find providers for parents, even when internal + // allow backend to find providers for parents, even when internal // CIDs are not announced, and will provide better key for caching // related DAGs. pathMetadata, getResp, err = i.backend.Get(ctx, rq.mostlyResolvedPath(), ranges...) @@ -96,13 +85,7 @@ func (i *handler) serveDefaults(ctx context.Context, w http.ResponseWriter, r *h } } } - if getResp.bytes != nil { - bytesResponse = getResp.bytes - defer bytesResponse.Close() - } else { - directoryMetadata = getResp.directoryMetadata - } - + defer getResp.Close() default: // This shouldn't be possible to reach which is why it is a 500 rather than 4XX error i.webError(w, r, fmt.Errorf("invalid method: cannot use this HTTP method with the given request"), http.StatusInternalServerError) @@ -112,29 +95,60 @@ func (i *handler) serveDefaults(ctx context.Context, w http.ResponseWriter, r *h setIpfsRootsHeader(w, rq, &pathMetadata) resolvedPath := pathMetadata.LastSegment - switch mc.Code(resolvedPath.Cid().Prefix().Codec) { + switch mc.Code(resolvedPath.RootCid().Prefix().Codec) { case mc.Json, mc.DagJson, mc.Cbor, mc.DagCbor: - if bytesResponse == nil { // This should never happen - i.webError(w, r, fmt.Errorf("decoding error: data not usable as a file"), http.StatusInternalServerError) - return false - } rq.logger.Debugw("serving codec", "path", rq.contentPath) - return i.renderCodec(r.Context(), w, r, rq, bytesResponse) + var blockSize int64 + var dataToRender io.ReadSeekCloser + if headResp != nil { + blockSize = headResp.bytesSize + dataToRender = nil + } else { + blockSize = getResp.bytesSize + dataAsReadSeekCloser, ok := getResp.bytes.(io.ReadSeekCloser) + if !ok { + i.webError(w, r, fmt.Errorf("expected returned non-UnixFS data to be seekable"), http.StatusInternalServerError) + } + dataToRender = dataAsReadSeekCloser + } + + return i.renderCodec(r.Context(), w, r, rq, blockSize, dataToRender) default: rq.logger.Debugw("serving unixfs", "path", rq.contentPath) ctx, span := spanTrace(ctx, "Handler.ServeUnixFS", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) defer span.End() - // Handling Unixfs file - if bytesResponse != nil { - rq.logger.Debugw("serving unixfs file", "path", rq.contentPath) - return i.serveFile(ctx, w, r, resolvedPath, rq.contentPath, bytesResponse, pathMetadata.ContentType, rq.begin) - } - - // Handling Unixfs directory - if directoryMetadata != nil || isDirectoryHeadRequest { - rq.logger.Debugw("serving unixfs directory", "path", rq.contentPath) - return i.serveDirectory(ctx, w, r, resolvedPath, rq.contentPath, isDirectoryHeadRequest, directoryMetadata, ranges, rq.begin, rq.logger) + // Handle UnixFS HEAD requests + if headResp != nil { + if headResp.isFile { + rq.logger.Debugw("serving unixfs file", "path", rq.contentPath) + return i.serveFile(ctx, w, r, resolvedPath, rq, headResp.bytesSize, headResp.startingBytes, false, true, pathMetadata.ContentType) + } else if headResp.isSymLink { + rq.logger.Debugw("serving unixfs file", "path", rq.contentPath) + return i.serveFile(ctx, w, r, resolvedPath, rq, headResp.bytesSize, nil, true, true, pathMetadata.ContentType) + } else if headResp.isDir { + rq.logger.Debugw("serving unixfs directory", "path", rq.contentPath) + return i.serveDirectory(ctx, w, r, resolvedPath, rq, true, nil, ranges) + } + } else { + if getResp.bytes != nil { + rq.logger.Debugw("serving unixfs file", "path", rq.contentPath) + rangeRequestStartsAtZero := true + if len(ranges) > 0 { + ra := ranges[0] + if ra.From != 0 { + rangeRequestStartsAtZero = false + } + } + return i.serveFile(ctx, w, r, resolvedPath, rq, getResp.bytesSize, getResp.bytes, false, rangeRequestStartsAtZero, pathMetadata.ContentType) + } else if getResp.symlink != nil { + rq.logger.Debugw("serving unixfs file", "path", rq.contentPath) + // Note: this ignores range requests against symlinks + return i.serveFile(ctx, w, r, resolvedPath, rq, getResp.bytesSize, getResp.symlink, true, true, pathMetadata.ContentType) + } else if getResp.directoryMetadata != nil { + rq.logger.Debugw("serving unixfs directory", "path", rq.contentPath) + return i.serveDirectory(ctx, w, r, resolvedPath, rq, false, getResp.directoryMetadata, ranges) + } } i.webError(w, r, fmt.Errorf("unsupported UnixFS type"), http.StatusInternalServerError) @@ -142,8 +156,8 @@ func (i *handler) serveDefaults(ctx context.Context, w http.ResponseWriter, r *h } } -// parseRange parses a Range header string as per RFC 7233. -func parseRange(s string) ([]ByteRange, error) { +// parseRangeWithoutLength parses a Range header string as per RFC 7233. +func parseRangeWithoutLength(s string) ([]ByteRange, error) { if s == "" { return nil, nil // header not present } diff --git a/gateway/handler_ipns_record.go b/gateway/handler_ipns_record.go index b077fa59a..93d1621f2 100644 --- a/gateway/handler_ipns_record.go +++ b/gateway/handler_ipns_record.go @@ -11,6 +11,7 @@ import ( "github.com/cespare/xxhash/v2" "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -20,7 +21,7 @@ func (i *handler) serveIpnsRecord(ctx context.Context, w http.ResponseWriter, r ctx, span := spanTrace(ctx, "Handler.ServeIPNSRecord", trace.WithAttributes(attribute.String("path", rq.contentPath.String()))) defer span.End() - if rq.contentPath.Namespace() != "ipns" { + if rq.contentPath.Namespace() != path.IPNSNamespace { err := fmt.Errorf("%s is not an IPNS link", rq.contentPath.String()) i.webError(w, r, err, http.StatusBadRequest) return false diff --git a/gateway/handler_tar.go b/gateway/handler_tar.go index 784e51993..0b2634804 100644 --- a/gateway/handler_tar.go +++ b/gateway/handler_tar.go @@ -28,10 +28,10 @@ func (i *handler) serveTAR(ctx context.Context, w http.ResponseWriter, r *http.R defer file.Close() setIpfsRootsHeader(w, rq, &pathMetadata) - rootCid := pathMetadata.LastSegment.Cid() + rootCid := pathMetadata.LastSegment.RootCid() // Set Cache-Control and read optional Last-Modified time - modtime := addCacheControlHeaders(w, r, rq.contentPath, rootCid, tarResponseFormat) + modtime := addCacheControlHeaders(w, r, rq.contentPath, rq.ttl, rq.lastMod, rootCid, tarResponseFormat) // Set Content-Disposition var name string diff --git a/gateway/handler_unixfs__redirects.go b/gateway/handler_unixfs__redirects.go index a96b87d36..a6fe24826 100644 --- a/gateway/handler_unixfs__redirects.go +++ b/gateway/handler_unixfs__redirects.go @@ -7,13 +7,11 @@ import ( gopath "path" "strconv" "strings" + "time" - "go.uber.org/zap" - - ipath "github.com/ipfs/boxo/coreiface/path" - "github.com/ipfs/boxo/files" - + "github.com/ipfs/boxo/path" redirects "github.com/ipfs/go-ipfs-redirects-file" + "go.uber.org/zap" ) // Resolving a UnixFS path involves determining if the provided `path.Path` exists and returning the `path.Resolved` @@ -38,23 +36,35 @@ import ( // // Note that for security reasons, redirect rules are only processed when the request has origin isolation. // See https://github.com/ipfs/specs/pull/290 for more information. -func (i *handler) serveRedirectsIfPresent(w http.ResponseWriter, r *http.Request, maybeResolvedImPath, immutableContentPath ImmutablePath, contentPath ipath.Path, logger *zap.SugaredLogger) (newContentPath ImmutablePath, continueProcessing bool, hadMatchingRule bool) { +func (i *handler) serveRedirectsIfPresent(w http.ResponseWriter, r *http.Request, maybeResolvedImPath, immutableContentPath path.ImmutablePath, contentPath path.Path, logger *zap.SugaredLogger) (newContentPath path.ImmutablePath, continueProcessing bool, hadMatchingRule bool) { // contentPath is the full ipfs path to the requested resource, // regardless of whether path or subdomain resolution is used. - rootPath := getRootPath(immutableContentPath) - redirectsPath := ipath.Join(rootPath, "_redirects") - imRedirectsPath, err := NewImmutablePath(redirectsPath) + rootPath, err := getRootPath(immutableContentPath) + if err != nil { + err = fmt.Errorf("trouble processing _redirects path %q: %w", immutableContentPath.String(), err) + i.webError(w, r, err, http.StatusInternalServerError) + return path.ImmutablePath{}, false, true + } + + redirectsPath, err := path.Join(rootPath, "_redirects") + if err != nil { + err = fmt.Errorf("trouble processing _redirects path %q: %w", rootPath.String(), err) + i.webError(w, r, err, http.StatusInternalServerError) + return path.ImmutablePath{}, false, true + } + + imRedirectsPath, err := path.NewImmutablePath(redirectsPath) if err != nil { err = fmt.Errorf("trouble processing _redirects path %q: %w", redirectsPath, err) i.webError(w, r, err, http.StatusInternalServerError) - return ImmutablePath{}, false, true + return path.ImmutablePath{}, false, true } foundRedirect, redirectRules, err := i.getRedirectRules(r, imRedirectsPath) if err != nil { err = fmt.Errorf("trouble processing _redirects file at %q: %w", redirectsPath, err) i.webError(w, r, err, http.StatusInternalServerError) - return ImmutablePath{}, false, true + return path.ImmutablePath{}, false, true } if foundRedirect { @@ -62,22 +72,27 @@ func (i *handler) serveRedirectsIfPresent(w http.ResponseWriter, r *http.Request if err != nil { err = fmt.Errorf("trouble processing _redirects file at %q: %w", redirectsPath, err) i.webError(w, r, err, http.StatusInternalServerError) - return ImmutablePath{}, false, true + return path.ImmutablePath{}, false, true } if redirected { - return ImmutablePath{}, false, true + return path.ImmutablePath{}, false, true } // 200 is treated as a rewrite, so update the path and continue if newPath != "" { // Reassign contentPath and resolvedPath since the URL was rewritten - p := ipath.New(newPath) - imPath, err := NewImmutablePath(p) + p, err := path.NewPath(newPath) + if err != nil { + err = fmt.Errorf("could not use _redirects file to %q: %w", p, err) + i.webError(w, r, err, http.StatusInternalServerError) + return path.ImmutablePath{}, false, true + } + imPath, err := path.NewImmutablePath(p) if err != nil { err = fmt.Errorf("could not use _redirects file to %q: %w", p, err) i.webError(w, r, err, http.StatusInternalServerError) - return ImmutablePath{}, false, true + return path.ImmutablePath{}, false, true } return imPath, true, true } @@ -87,7 +102,7 @@ func (i *handler) serveRedirectsIfPresent(w http.ResponseWriter, r *http.Request return maybeResolvedImPath, true, false } -func (i *handler) handleRedirectsFileRules(w http.ResponseWriter, r *http.Request, immutableContentPath ImmutablePath, cPath ipath.Path, redirectRules []redirects.Rule, logger *zap.SugaredLogger) (redirected bool, newContentPath string, err error) { +func (i *handler) handleRedirectsFileRules(w http.ResponseWriter, r *http.Request, immutableContentPath path.ImmutablePath, cPath path.Path, redirectRules []redirects.Rule, logger *zap.SugaredLogger) (redirected bool, newContentPath string, err error) { // Attempt to match a rule to the URL path, and perform the corresponding redirect or rewrite pathParts := strings.Split(immutableContentPath.String(), "/") if len(pathParts) > 3 { @@ -115,7 +130,12 @@ func (i *handler) handleRedirectsFileRules(w http.ResponseWriter, r *http.Reques // Or 4xx if rule.Status == 404 || rule.Status == 410 || rule.Status == 451 { toPath := rootPath + rule.To - imContent4xxPath, err := NewImmutablePath(ipath.New(toPath)) + p, err := path.NewPath(toPath) + if err != nil { + return true, toPath, err + } + + imContent4xxPath, err := path.NewImmutablePath(p) if err != nil { return true, toPath, err } @@ -129,7 +149,11 @@ func (i *handler) handleRedirectsFileRules(w http.ResponseWriter, r *http.Reques } // All paths should start with /ip(f|n)s//, so get the path after that contentRootPath := strings.Join(contentPathParts[:3], "/") - content4xxPath := ipath.New(contentRootPath + rule.To) + content4xxPath, err := path.NewPath(contentRootPath + rule.To) + if err != nil { + return true, toPath, err + } + err = i.serve4xx(w, r, imContent4xxPath, content4xxPath, rule.Status, logger) return true, toPath, err } @@ -149,7 +173,7 @@ func (i *handler) handleRedirectsFileRules(w http.ResponseWriter, r *http.Reques // getRedirectRules fetches the _redirects file corresponding to a given path and returns the rules // Returns whether _redirects was found, the rules (if they exist) and if there was an error (other than a missing _redirects) // If there is an error returns (false, nil, err) -func (i *handler) getRedirectRules(r *http.Request, redirectsPath ImmutablePath) (bool, []redirects.Rule, error) { +func (i *handler) getRedirectRules(r *http.Request, redirectsPath path.ImmutablePath) (bool, []redirects.Rule, error) { // Check for _redirects file. // Any path resolution failures are ignored and we just assume there's no _redirects file. // Note that ignoring these errors also ensures that the use of the empty CID (bafkqaaa) in tests doesn't fail. @@ -160,12 +184,12 @@ func (i *handler) getRedirectRules(r *http.Request, redirectsPath ImmutablePath) } return false, nil, err } + defer redirectsFileGetResp.Close() if redirectsFileGetResp.bytes == nil { return false, nil, fmt.Errorf(" _redirects is not a file") } f := redirectsFileGetResp.bytes - defer f.Close() // Parse redirect rules from file redirectRules, err := redirects.Parse(f) @@ -176,34 +200,31 @@ func (i *handler) getRedirectRules(r *http.Request, redirectsPath ImmutablePath) } // Returns the root CID Path for the given path -func getRootPath(path ipath.Path) ipath.Path { - parts := strings.Split(path.String(), "/") - return ipath.New(gopath.Join("/", path.Namespace(), parts[2])) +func getRootPath(p path.Path) (path.Path, error) { + parts := strings.Split(p.String(), "/") + return path.NewPath(gopath.Join("/", p.Namespace(), parts[2])) } -func (i *handler) serve4xx(w http.ResponseWriter, r *http.Request, content4xxPathImPath ImmutablePath, content4xxPath ipath.Path, status int, logger *zap.SugaredLogger) error { +func (i *handler) serve4xx(w http.ResponseWriter, r *http.Request, content4xxPathImPath path.ImmutablePath, content4xxPath path.Path, status int, logger *zap.SugaredLogger) error { pathMetadata, getresp, err := i.backend.Get(r.Context(), content4xxPathImPath) if err != nil { return err } + defer getresp.Close() if getresp.bytes == nil { return fmt.Errorf("could not convert node for %d page to file", status) } content4xxFile := getresp.bytes - defer content4xxFile.Close() - content4xxCid := pathMetadata.LastSegment.Cid() + content4xxCid := pathMetadata.LastSegment.RootCid() - size, err := content4xxFile.Size() - if err != nil { - return fmt.Errorf("could not get size of %d page", status) - } + size := getresp.bytesSize logger.Debugf("using _redirects: custom %d file at %q", status, content4xxPath) w.Header().Set("Content-Type", "text/html") w.Header().Set("Content-Length", strconv.FormatInt(size, 10)) - addCacheControlHeaders(w, r, content4xxPath, content4xxCid, "") + addCacheControlHeaders(w, r, content4xxPath, 0, time.Time{}, content4xxCid, "") w.WriteHeader(status) _, err = io.CopyN(w, content4xxFile, size) return err @@ -219,76 +240,3 @@ func hasOriginIsolation(r *http.Request) bool { return false } - -// Deprecated: legacy ipfs-404.html files are superseded by _redirects file -// This is provided only for backward-compatibility, until websites migrate -// to 404s managed via _redirects file (https://github.com/ipfs/specs/pull/290) -func (i *handler) serveLegacy404IfPresent(w http.ResponseWriter, r *http.Request, imPath ImmutablePath, logger *zap.SugaredLogger) bool { - resolved404File, ctype, err := i.searchUpTreeFor404(r, imPath) - if err != nil { - return false - } - defer resolved404File.Close() - - size, err := resolved404File.Size() - if err != nil { - return false - } - - logger.Debugw("using pretty 404 file", "path", imPath) - w.Header().Set("Content-Type", ctype) - w.Header().Set("Content-Length", strconv.FormatInt(size, 10)) - w.WriteHeader(http.StatusNotFound) - _, err = io.CopyN(w, resolved404File, size) - return err == nil -} - -func (i *handler) searchUpTreeFor404(r *http.Request, imPath ImmutablePath) (files.File, string, error) { - filename404, ctype, err := preferred404Filename(r.Header.Values("Accept")) - if err != nil { - return nil, "", err - } - - pathComponents := strings.Split(imPath.String(), "/") - - for idx := len(pathComponents); idx >= 3; idx-- { - pretty404 := gopath.Join(append(pathComponents[0:idx], filename404)...) - parsed404Path := ipath.New("/" + pretty404) - if parsed404Path.IsValid() != nil { - break - } - imparsed404Path, err := NewImmutablePath(parsed404Path) - if err != nil { - break - } - - _, getResp, err := i.backend.Get(r.Context(), imparsed404Path) - if err != nil { - continue - } - if getResp.bytes == nil { - return nil, "", fmt.Errorf("found a pretty 404 but it was not a file") - } - return getResp.bytes, ctype, nil - } - - return nil, "", fmt.Errorf("no pretty 404 in any parent folder") -} - -func preferred404Filename(acceptHeaders []string) (string, string, error) { - // If we ever want to offer a 404 file for a different content type - // then this function will need to parse q weightings, but for now - // the presence of anything matching HTML is enough. - for _, acceptHeader := range acceptHeaders { - accepted := strings.Split(acceptHeader, ",") - for _, spec := range accepted { - contentType := strings.SplitN(spec, ";", 1)[0] - switch contentType { - case "*/*", "text/*", "text/html": - return "ipfs-404.html", "text/html", nil - } - } - } - - return "", "", fmt.Errorf("there is no 404 file for the requested content types") -} diff --git a/gateway/handler_unixfs_dir.go b/gateway/handler_unixfs_dir.go index 2808cfdc4..098a77b6a 100644 --- a/gateway/handler_unixfs_dir.go +++ b/gateway/handler_unixfs_dir.go @@ -3,6 +3,7 @@ package gateway import ( "context" "fmt" + "io" "net/http" "net/url" gopath "path" @@ -10,20 +11,18 @@ import ( "time" "github.com/dustin/go-humanize" - ipath "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/files" "github.com/ipfs/boxo/gateway/assets" - path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" cid "github.com/ipfs/go-cid" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" ) // serveDirectory returns the best representation of UnixFS directory // // It will return index.html if present, or generate directory listing otherwise. -func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, isHeadRequest bool, directoryMetadata *directoryMetadata, ranges []ByteRange, begin time.Time, logger *zap.SugaredLogger) bool { +func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath path.ImmutablePath, rq *requestData, isHeadRequest bool, directoryMetadata *directoryMetadata, ranges []ByteRange) bool { ctx, span := spanTrace(ctx, "Handler.ServeDirectory", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) defer span.End() @@ -52,57 +51,80 @@ func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r * } // /ipfs/cid/foo?bar must be redirected to /ipfs/cid/foo/?bar redirectURL := originalURLPath + suffix - logger.Debugw("directory location moved permanently", "status", http.StatusMovedPermanently) + rq.logger.Debugw("directory location moved permanently", "status", http.StatusMovedPermanently) http.Redirect(w, r, redirectURL, http.StatusMovedPermanently) return true } } // Check if directory has index.html, if so, serveFile - idxPath := ipath.Join(contentPath, "index.html") - imIndexPath, err := NewImmutablePath(ipath.Join(resolvedPath, "index.html")) + idxPath, err := path.Join(rq.contentPath, "index.html") + if err != nil { + i.webError(w, r, err, http.StatusInternalServerError) + return false + } + + indexPath, err := path.Join(resolvedPath, "index.html") + if err != nil { + i.webError(w, r, err, http.StatusInternalServerError) + return false + } + + imIndexPath, err := path.NewImmutablePath(indexPath) if err != nil { i.webError(w, r, err, http.StatusInternalServerError) return false } // TODO: could/should this all be skipped to have HEAD requests just return html content type and save the complexity? If so can we skip the above code as well? - var idxFile files.File + var idxFileBytes io.ReadCloser + var idxFileSize int64 + var returnRangeStartsAtZero bool if isHeadRequest { - var idx files.Node - _, idx, err = i.backend.Head(ctx, imIndexPath) + var idxHeadResp *HeadResponse + _, idxHeadResp, err = i.backend.Head(ctx, imIndexPath) if err == nil { - f, ok := idx.(files.File) - if !ok { + defer idxHeadResp.Close() + if !idxHeadResp.isFile { i.webError(w, r, fmt.Errorf("%q could not be read: %w", imIndexPath, files.ErrNotReader), http.StatusUnprocessableEntity) return false } - idxFile = f + returnRangeStartsAtZero = true + idxFileBytes = idxHeadResp.startingBytes + idxFileSize = idxHeadResp.bytesSize } } else { - var getResp *GetResponse - _, getResp, err = i.backend.Get(ctx, imIndexPath, ranges...) + var idxGetResp *GetResponse + _, idxGetResp, err = i.backend.Get(ctx, imIndexPath, ranges...) if err == nil { - if getResp.bytes == nil { + defer idxGetResp.Close() + if idxGetResp.bytes == nil { i.webError(w, r, fmt.Errorf("%q could not be read: %w", imIndexPath, files.ErrNotReader), http.StatusUnprocessableEntity) return false } - idxFile = getResp.bytes + if len(ranges) > 0 { + ra := ranges[0] + returnRangeStartsAtZero = ra.From == 0 + } + idxFileBytes = idxGetResp.bytes + idxFileSize = idxGetResp.bytesSize } } if err == nil { - logger.Debugw("serving index.html file", "path", idxPath) + rq.logger.Debugw("serving index.html file", "path", idxPath) + originalContentPath := rq.contentPath + rq.contentPath = idxPath // write to request - success := i.serveFile(ctx, w, r, resolvedPath, idxPath, idxFile, "text/html", begin) + success := i.serveFile(ctx, w, r, resolvedPath, rq, idxFileSize, idxFileBytes, false, returnRangeStartsAtZero, "text/html") if success { - i.unixfsDirIndexGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + i.unixfsDirIndexGetMetric.WithLabelValues(originalContentPath.Namespace()).Observe(time.Since(rq.begin).Seconds()) } return success } if isErrNotFound(err) { - logger.Debugw("no index.html; noop", "path", idxPath) + rq.logger.Debugw("no index.html; noop", "path", idxPath) } else if err != nil { i.webError(w, r, err, http.StatusInternalServerError) return false @@ -113,11 +135,16 @@ func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r * w.Header().Set("Content-Type", "text/html") // Generated dir index requires custom Etag (output may change between go-libipfs versions) - dirEtag := getDirListingEtag(resolvedPath.Cid()) + dirEtag := getDirListingEtag(resolvedPath.RootCid()) w.Header().Set("Etag", dirEtag) + // Add TTL if known. + if rq.ttl > 0 { + w.Header().Set("Cache-Control", fmt.Sprintf("public, max-age=%d", int(rq.ttl.Seconds()))) + } + if r.Method == http.MethodHead { - logger.Debug("return as request's HTTP method is HEAD") + rq.logger.Debug("return as request's HTTP method is HEAD") return true } @@ -148,7 +175,7 @@ func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r * backLink := originalURLPath // don't go further up than /ipfs/$hash/ - pathSplit := path.SplitList(contentPath.String()) + pathSplit := strings.Split(rq.contentPath.String(), "/") switch { // skip backlink when listing a content root case len(pathSplit) == 3: // url: /ipfs/$hash @@ -168,29 +195,29 @@ func (i *handler) serveDirectory(ctx context.Context, w http.ResponseWriter, r * } size := humanize.Bytes(directoryMetadata.dagSize) - hash := resolvedPath.Cid().String() - globalData := i.getTemplateGlobalData(r, contentPath) + hash := resolvedPath.RootCid().String() + globalData := i.getTemplateGlobalData(r, rq.contentPath) // See comment above where originalUrlPath is declared. tplData := assets.DirectoryTemplateData{ GlobalData: globalData, Listing: dirListing, Size: size, - Path: contentPath.String(), - Breadcrumbs: assets.Breadcrumbs(contentPath.String(), globalData.DNSLink), + Path: rq.contentPath.String(), + Breadcrumbs: assets.Breadcrumbs(rq.contentPath.String(), globalData.DNSLink), BackLink: backLink, Hash: hash, } - logger.Debugw("request processed", "tplDataDNSLink", globalData.DNSLink, "tplDataSize", size, "tplDataBackLink", backLink, "tplDataHash", hash) + rq.logger.Debugw("request processed", "tplDataDNSLink", globalData.DNSLink, "tplDataSize", size, "tplDataBackLink", backLink, "tplDataHash", hash) if err := assets.DirectoryTemplate.Execute(w, tplData); err != nil { - i.webError(w, r, err, http.StatusInternalServerError) + _, _ = w.Write([]byte(fmt.Sprintf("error during body generation: %v", err))) return false } // Update metrics - i.unixfsGenDirListingGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + i.unixfsGenDirListingGetMetric.WithLabelValues(rq.contentPath.Namespace()).Observe(time.Since(rq.begin).Seconds()) return true } diff --git a/gateway/handler_unixfs_dir_test.go b/gateway/handler_unixfs_dir_test.go index a8ce04778..e44708687 100644 --- a/gateway/handler_unixfs_dir_test.go +++ b/gateway/handler_unixfs_dir_test.go @@ -6,8 +6,7 @@ import ( "net/http" "testing" - ipath "github.com/ipfs/boxo/coreiface/path" - path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/stretchr/testify/require" ) @@ -19,13 +18,17 @@ func TestIPNSHostnameBacklinks(t *testing.T) { defer cancel() // create /ipns/example.net/foo/ - k2, err := backend.resolvePathNoRootsReturned(ctx, ipath.Join(ipath.IpfsPath(root), "foo? #<'")) + p2, err := path.Join(path.FromCid(root), "foo? #<'") + require.NoError(t, err) + k2, err := backend.resolvePathNoRootsReturned(ctx, p2) require.NoError(t, err) - k3, err := backend.resolvePathNoRootsReturned(ctx, ipath.Join(ipath.IpfsPath(root), "foo? #<'/bar")) + p3, err := path.Join(path.FromCid(root), "foo? #<'/bar") + require.NoError(t, err) + k3, err := backend.resolvePathNoRootsReturned(ctx, p3) require.NoError(t, err) - backend.namesys["/ipns/example.net"] = path.FromCid(root) + backend.namesys["/ipns/example.net"] = newMockNamesysItem(path.FromCid(root), 0) // make request to directory listing req := mustNewRequest(t, http.MethodGet, ts.URL+"/foo%3F%20%23%3C%27/", nil) @@ -44,7 +47,7 @@ func TestIPNSHostnameBacklinks(t *testing.T) { require.Contains(t, s, "", "expected backlink in directory listing") require.Contains(t, s, "", "expected file in directory listing") - require.Contains(t, s, s, k2.Cid().String(), "expected hash in directory listing") + require.Contains(t, s, s, k2.RootCid().String(), "expected hash in directory listing") // make request to directory listing at root req = mustNewRequest(t, http.MethodGet, ts.URL, nil) @@ -83,5 +86,5 @@ func TestIPNSHostnameBacklinks(t *testing.T) { require.True(t, matchPathOrBreadcrumbs(s, "/ipns/example.net/foo? #<'/bar"), "expected a path in directory listing") require.Contains(t, s, "", "expected backlink in directory listing") require.Contains(t, s, "", "expected file in directory listing") - require.Contains(t, s, k3.Cid().String(), "expected hash in directory listing") + require.Contains(t, s, k3.RootCid().String(), "expected hash in directory listing") } diff --git a/gateway/handler_unixfs_file.go b/gateway/handler_unixfs_file.go index cd924e5aa..734e11025 100644 --- a/gateway/handler_unixfs_file.go +++ b/gateway/handler_unixfs_file.go @@ -1,6 +1,7 @@ package gateway import ( + "bytes" "context" "fmt" "io" @@ -11,32 +12,24 @@ import ( "time" "github.com/gabriel-vasile/mimetype" - ipath "github.com/ipfs/boxo/coreiface/path" - "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/path" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) // serveFile returns data behind a file along with HTTP headers based on // the file itself, its CID and the contentPath used for accessing it. -func (i *handler) serveFile(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath ipath.Resolved, contentPath ipath.Path, file files.File, fileContentType string, begin time.Time) bool { +func (i *handler) serveFile(ctx context.Context, w http.ResponseWriter, r *http.Request, resolvedPath path.ImmutablePath, rq *requestData, fileSize int64, fileBytes io.ReadCloser, isSymlink bool, returnRangeStartsAtZero bool, fileContentType string) bool { _, span := spanTrace(ctx, "Handler.ServeFile", trace.WithAttributes(attribute.String("path", resolvedPath.String()))) defer span.End() // Set Cache-Control and read optional Last-Modified time - modtime := addCacheControlHeaders(w, r, contentPath, resolvedPath.Cid(), "") + modtime := addCacheControlHeaders(w, r, rq.contentPath, rq.ttl, rq.lastMod, resolvedPath.RootCid(), "") // Set Content-Disposition - name := addContentDispositionHeader(w, r, contentPath) + name := addContentDispositionHeader(w, r, rq.contentPath) - // Prepare size value for Content-Length HTTP header (set inside of http.ServeContent) - size, err := file.Size() - if err != nil { - http.Error(w, "cannot serve files with unknown sizes", http.StatusBadGateway) - return false - } - - if size == 0 { + if fileSize == 0 { // We override null files to 200 to avoid issues with fragment caching reverse proxies. // Also whatever you are asking for, it's cheaper to just give you the complete file (nothing). // TODO: remove this if clause once https://github.com/golang/go/issues/54794 is fixed in two latest releases of go @@ -45,16 +38,11 @@ func (i *handler) serveFile(ctx context.Context, w http.ResponseWriter, r *http. return true } - // Lazy seeker enables efficient range-requests and HTTP HEAD responses - content := &lazySeeker{ - size: size, - reader: file, - } - + var content io.Reader = fileBytes // Calculate deterministic value for Content-Type HTTP header // (we prefer to do it here, rather than using implicit sniffing in http.ServeContent) var ctype string - if _, isSymlink := file.(*files.Symlink); isSymlink { + if isSymlink { // We should be smarter about resolving symlinks but this is the // "most correct" we can be without doing that. ctype = "inode/symlink" @@ -63,21 +51,24 @@ func (i *handler) serveFile(ctx context.Context, w http.ResponseWriter, r *http. if ctype == "" { ctype = fileContentType } - if ctype == "" { + if ctype == "" && returnRangeStartsAtZero { // uses https://github.com/gabriel-vasile/mimetype library to determine the content type. // Fixes https://github.com/ipfs/kubo/issues/7252 - mimeType, err := mimetype.DetectReader(content) + + // We read from a TeeReader into a buffer and then put the buffer in front of the original reader to + // simulate the behavior of being able to read from the start and then seek back to the beginning while + // only having a Reader and not a ReadSeeker + var buf bytes.Buffer + tr := io.TeeReader(fileBytes, &buf) + + mimeType, err := mimetype.DetectReader(tr) if err != nil { http.Error(w, fmt.Sprintf("cannot detect content-type: %s", err.Error()), http.StatusInternalServerError) return false } ctype = mimeType.String() - _, err = content.Seek(0, io.SeekStart) - if err != nil { - http.Error(w, "seeker can't seek", http.StatusInternalServerError) - return false - } + content = io.MultiReader(&buf, fileBytes) } // Strip the encoding from the HTML Content-Type header and let the // browser figure it out. @@ -93,12 +84,12 @@ func (i *handler) serveFile(ctx context.Context, w http.ResponseWriter, r *http. // ServeContent will take care of // If-None-Match+Etag, Content-Length and range requests - _, dataSent, _ := serveContent(w, r, name, modtime, content) + _, dataSent, _ := serveContent(w, r, modtime, fileSize, content) // Was response successful? if dataSent { // Update metrics - i.unixfsFileGetMetric.WithLabelValues(contentPath.Namespace()).Observe(time.Since(begin).Seconds()) + i.unixfsFileGetMetric.WithLabelValues(rq.contentPath.Namespace()).Observe(time.Since(rq.begin).Seconds()) } return dataSent diff --git a/gateway/hostname.go b/gateway/hostname.go index 758d8499c..6b485f0b4 100644 --- a/gateway/hostname.go +++ b/gateway/hostname.go @@ -165,15 +165,32 @@ func NewHostnameHandler(c Config, backend IPFSBackend, next http.Handler) http.H // can be loaded from a subdomain gateway with a wildcard // TLS cert if represented as a single DNS label: // https://my-v--long-example-com.ipns.dweb.link - if ns == "ipns" && !strings.Contains(rootID, ".") { - // if there is no TXT recordfor rootID - if !hasDNSLinkRecord(r.Context(), backend, rootID) { - // my-v--long-example-com → my.v-long.example.com - dnslinkFQDN := toDNSLinkFQDN(rootID) - if hasDNSLinkRecord(r.Context(), backend, dnslinkFQDN) { - // update path prefix to use real FQDN with DNSLink - pathPrefix = "/ipns/" + dnslinkFQDN - } + if ns == "ipns" && !strings.Contains(rootID, ".") && strings.Contains(rootID, "-") { + // If there are no '.' but '-' is present in rootID, we most + // likely have an inlined DNSLink (like my-v--long-example-com) + + // We un-inline and check for DNSLink presence on domain with '.' + // first to minimize the amount of DNS lookups: + // my-v--long-example-com → my.v-long.example.com + dnslinkFQDN := UninlineDNSLink(rootID) + + // Does _dnslink.my.v-long.example.com exist? + if hasDNSLinkRecord(r.Context(), backend, dnslinkFQDN) { + // Un-inlined DNS name has a valid DNSLink record. + // Update path prefix to use un-inlined FQDN in gateway processing. + pathPrefix = "/ipns/" + dnslinkFQDN // → /ipns/my.v-long.example.com + + } else if !hasDNSLinkRecord(r.Context(), backend, rootID) { + // Inspected _dnslink.my-v--long-example-com as a + // fallback, but it had no DNSLink record either. + + // At this point it is more likely the un-inlined + // dnslinkFQDN is what the end user wanted to load, so + // we switch to that. This ensures the error message + // about missing DNSLink will use the un-inlined FQDN, + // and not the inlined one. + pathPrefix = "/ipns/" + dnslinkFQDN + } } } @@ -306,22 +323,59 @@ func isHTTPSRequest(r *http.Request) bool { // Converts a FQDN to DNS-safe representation that fits in 63 characters: // my.v-long.example.com → my-v--long-example-com -func toDNSLinkDNSLabel(fqdn string) (dnsLabel string, err error) { +// InlineDNSLink implements specification from https://specs.ipfs.tech/http-gateways/subdomain-gateway/#host-request-header +func InlineDNSLink(fqdn string) (dnsLabel string, err error) { + /* What follows is an optimized version this three-liner: dnsLabel = strings.ReplaceAll(fqdn, "-", "--") dnsLabel = strings.ReplaceAll(dnsLabel, ".", "-") if len(dnsLabel) > dnsLabelMaxLength { return "", fmt.Errorf("DNSLink representation incompatible with DNS label length limit of 63: %s", dnsLabel) } return dnsLabel, nil + */ + result := make([]byte, 0, len(fqdn)) + for i := 0; i < len(fqdn); i++ { + char := fqdn[i] + if char == '-' { + result = append(result, '-', '-') + } else if char == '.' { + result = append(result, '-') + } else { + result = append(result, char) + } + } + if len(result) > dnsLabelMaxLength { + return "", fmt.Errorf("inlined DNSLink incompatible with DNS label length limit of 63: %q", result) + } + return string(result), nil } // Converts a DNS-safe representation of DNSLink FQDN to real FQDN: // my-v--long-example-com → my.v-long.example.com -func toDNSLinkFQDN(dnsLabel string) (fqdn string) { +// UninlineDNSLink implements specification from https://specs.ipfs.tech/http-gateways/subdomain-gateway/#host-request-header +func UninlineDNSLink(dnsLabel string) (fqdn string) { + /* What follows is an optimized version this three-liner: fqdn = strings.ReplaceAll(dnsLabel, "--", "@") // @ placeholder is unused in DNS labels fqdn = strings.ReplaceAll(fqdn, "-", ".") fqdn = strings.ReplaceAll(fqdn, "@", "-") return fqdn + */ + result := make([]byte, 0, len(dnsLabel)) + for i := 0; i < len(dnsLabel); i++ { + if dnsLabel[i] == '-' { + if i+1 < len(dnsLabel) && dnsLabel[i+1] == '-' { + // Handle '--' by appending a single '-' + result = append(result, '-') + i++ + } else { + // Handle single '-' by appending '.' + result = append(result, '.') + } + } else { + result = append(result, dnsLabel[i]) + } + } + return string(result) } // Converts a hostname/path to a subdomain-based URL, if applicable. @@ -402,7 +456,7 @@ func toSubdomainURL(hostname, path string, r *http.Request, inlineDNSLink bool, // e.g. when ipfs-companion extension passes value from subdomain gateway // for further normalization: https://github.com/ipfs/ipfs-companion/issues/1278#issuecomment-1724550623 if ns == "ipns" && !strings.Contains(rootID, ".") && strings.Contains(rootID, "-") { - dnsLinkFqdn := toDNSLinkFQDN(rootID) // my-v--long-example-com → my.v-long.example.com + dnsLinkFqdn := UninlineDNSLink(rootID) // my-v--long-example-com → my.v-long.example.com if hasDNSLinkRecord(r.Context(), backend, dnsLinkFqdn) { // update path prefix to use real FQDN with DNSLink rootID = dnsLinkFqdn @@ -425,7 +479,7 @@ func toSubdomainURL(hostname, path string, r *http.Request, inlineDNSLink bool, // https://my-v--long-example-com.ipns.dweb.link if hasDNSLinkRecord(r.Context(), backend, rootID) { // my.v-long.example.com → my-v--long-example-com - dnsLabel, err := toDNSLinkDNSLabel(rootID) + dnsLabel, err := InlineDNSLink(rootID) if err != nil { return "", err } diff --git a/gateway/hostname_test.go b/gateway/hostname_test.go index f7cee35a2..1a150facb 100644 --- a/gateway/hostname_test.go +++ b/gateway/hostname_test.go @@ -5,9 +5,10 @@ import ( "fmt" "net/http" "net/http/httptest" + "strings" "testing" - path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" cid "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,8 +21,8 @@ func TestToSubdomainURL(t *testing.T) { testCID, err := cid.Decode("bafkqaglimvwgy3zakrsxg5cun5jxkyten5wwc2lokvjeycq") require.NoError(t, err) - backend.namesys["/ipns/dnslink.long-name.example.com"] = path.FromString(testCID.String()) - backend.namesys["/ipns/dnslink.too-long.f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o.example.com"] = path.FromString(testCID.String()) + backend.namesys["/ipns/dnslink.long-name.example.com"] = newMockNamesysItem(path.FromCid(testCID), 0) + backend.namesys["/ipns/dnslink.too-long.f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o.example.com"] = newMockNamesysItem(path.FromCid(testCID), 0) httpRequest := httptest.NewRequest("GET", "http://127.0.0.1:8080", nil) httpsRequest := httptest.NewRequest("GET", "https://https-request-stub.example.com", nil) httpsProxiedRequest := httptest.NewRequest("GET", "http://proxied-https-request-stub.example.com", nil) @@ -82,10 +83,13 @@ func TestToDNSLinkDNSLabel(t *testing.T) { err error }{ {"dnslink.long-name.example.com", "dnslink-long--name-example-com", nil}, - {"dnslink.too-long.f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o.example.com", "", errors.New("DNSLink representation incompatible with DNS label length limit of 63: dnslink-too--long-f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o-example-com")}, + {"singlelabel", "singlelabel", nil}, + {"example.com", "example-com", nil}, + {"en.wikipedia-on-ipfs.org", "en-wikipedia--on--ipfs-org", nil}, + {"dnslink.too-long.f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o.example.com", "", errors.New(`inlined DNSLink incompatible with DNS label length limit of 63: "dnslink-too--long-f1siqrebi3vir8sab33hu5vcy008djegvay6atmz91ojesyjs8lx350b7y7i1nvyw2haytfukfyu2f2x4tocdrfa0zgij6p4zpl4u5o-example-com"`)}, } { t.Run(test.in, func(t *testing.T) { - out, err := toDNSLinkDNSLabel(test.in) + out, err := InlineDNSLink(test.in) require.Equal(t, test.out, out) require.Equal(t, test.err, err) }) @@ -99,11 +103,14 @@ func TestToDNSLinkFQDN(t *testing.T) { out string }{ {"singlelabel", "singlelabel"}, + {"no--tld", "no-tld"}, + {"example.com", "example.com"}, {"docs-ipfs-tech", "docs.ipfs.tech"}, + {"en-wikipedia--on--ipfs-org", "en.wikipedia-on-ipfs.org"}, {"dnslink-long--name-example-com", "dnslink.long-name.example.com"}, } { t.Run(test.in, func(t *testing.T) { - out := toDNSLinkFQDN(test.in) + out := UninlineDNSLink(test.in) require.Equal(t, test.out, out) }) } @@ -305,3 +312,55 @@ func TestKnownSubdomainDetails(t *testing.T) { }) } } + +const testInlinedDNSLinkA = "example-com" +const testInlinedDNSLinkB = "docs-ipfs-tech" +const testInlinedDNSLinkC = "en-wikipedia--on--ipfs-org" +const testDNSLinkA = "example.com" +const testDNSLinkB = "docs.ipfs.tech" +const testDNSLinkC = "en.wikipedia-on-ipfs.org" + +func inlineDNSLinkSimple(fqdn string) (dnsLabel string, err error) { + dnsLabel = strings.ReplaceAll(fqdn, "-", "--") + dnsLabel = strings.ReplaceAll(dnsLabel, ".", "-") + if len(dnsLabel) > dnsLabelMaxLength { + return "", fmt.Errorf("inlined DNSLink incompatible with DNS label length limit of 63: %q", dnsLabel) + } + return dnsLabel, nil +} +func uninlineDNSLinkSimple(dnsLabel string) (fqdn string) { + fqdn = strings.ReplaceAll(dnsLabel, "--", "@") // @ placeholder is unused in DNS labels + fqdn = strings.ReplaceAll(fqdn, "-", ".") + fqdn = strings.ReplaceAll(fqdn, "@", "-") + return fqdn +} + +func BenchmarkUninlineDNSLinkSimple(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = uninlineDNSLinkSimple(testInlinedDNSLinkA) + _ = uninlineDNSLinkSimple(testInlinedDNSLinkB) + _ = uninlineDNSLinkSimple(testInlinedDNSLinkC) + } +} +func BenchmarkUninlineDNSLink(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = UninlineDNSLink(testInlinedDNSLinkA) + _ = UninlineDNSLink(testInlinedDNSLinkB) + _ = UninlineDNSLink(testInlinedDNSLinkC) + } +} + +func BenchmarkInlineDNSLinkSimple(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = inlineDNSLinkSimple(testDNSLinkA) + _, _ = inlineDNSLinkSimple(testDNSLinkB) + _, _ = inlineDNSLinkSimple(testDNSLinkC) + } +} +func BenchmarkInlineDNSLink(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = InlineDNSLink(testDNSLinkA) + _, _ = InlineDNSLink(testDNSLinkB) + _, _ = InlineDNSLink(testDNSLinkC) + } +} diff --git a/gateway/metrics.go b/gateway/metrics.go index 69e81425f..df8639956 100644 --- a/gateway/metrics.go +++ b/gateway/metrics.go @@ -6,8 +6,8 @@ import ( "io" "time" - "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/files" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" prometheus "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel" @@ -60,7 +60,7 @@ func (b *ipfsBackendWithMetrics) updateBackendCallMetric(name string, err error, } } -func (b *ipfsBackendWithMetrics) Get(ctx context.Context, path ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { +func (b *ipfsBackendWithMetrics) Get(ctx context.Context, path path.ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { begin := time.Now() name := "IPFSBackend.Get" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()), attribute.Int("ranges", len(ranges)))) @@ -72,7 +72,7 @@ func (b *ipfsBackendWithMetrics) Get(ctx context.Context, path ImmutablePath, ra return md, f, err } -func (b *ipfsBackendWithMetrics) GetAll(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (b *ipfsBackendWithMetrics) GetAll(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, files.Node, error) { begin := time.Now() name := "IPFSBackend.GetAll" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()))) @@ -84,7 +84,7 @@ func (b *ipfsBackendWithMetrics) GetAll(ctx context.Context, path ImmutablePath) return md, n, err } -func (b *ipfsBackendWithMetrics) GetBlock(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.File, error) { +func (b *ipfsBackendWithMetrics) GetBlock(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, files.File, error) { begin := time.Now() name := "IPFSBackend.GetBlock" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()))) @@ -96,7 +96,7 @@ func (b *ipfsBackendWithMetrics) GetBlock(ctx context.Context, path ImmutablePat return md, n, err } -func (b *ipfsBackendWithMetrics) Head(ctx context.Context, path ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (b *ipfsBackendWithMetrics) Head(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, *HeadResponse, error) { begin := time.Now() name := "IPFSBackend.Head" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()))) @@ -108,7 +108,7 @@ func (b *ipfsBackendWithMetrics) Head(ctx context.Context, path ImmutablePath) ( return md, n, err } -func (b *ipfsBackendWithMetrics) ResolvePath(ctx context.Context, path ImmutablePath) (ContentPathMetadata, error) { +func (b *ipfsBackendWithMetrics) ResolvePath(ctx context.Context, path path.ImmutablePath) (ContentPathMetadata, error) { begin := time.Now() name := "IPFSBackend.ResolvePath" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()))) @@ -120,7 +120,7 @@ func (b *ipfsBackendWithMetrics) ResolvePath(ctx context.Context, path Immutable return md, err } -func (b *ipfsBackendWithMetrics) GetCAR(ctx context.Context, path ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { +func (b *ipfsBackendWithMetrics) GetCAR(ctx context.Context, path path.ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { begin := time.Now() name := "IPFSBackend.GetCAR" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()))) @@ -155,16 +155,16 @@ func (b *ipfsBackendWithMetrics) GetIPNSRecord(ctx context.Context, cid cid.Cid) return r, err } -func (b *ipfsBackendWithMetrics) ResolveMutable(ctx context.Context, path path.Path) (ImmutablePath, error) { +func (b *ipfsBackendWithMetrics) ResolveMutable(ctx context.Context, path path.Path) (path.ImmutablePath, time.Duration, time.Time, error) { begin := time.Now() name := "IPFSBackend.ResolveMutable" ctx, span := spanTrace(ctx, name, trace.WithAttributes(attribute.String("path", path.String()))) defer span.End() - p, err := b.backend.ResolveMutable(ctx, path) + p, ttl, lastMod, err := b.backend.ResolveMutable(ctx, path) b.updateBackendCallMetric(name, err, begin) - return p, err + return p, ttl, lastMod, err } func (b *ipfsBackendWithMetrics) GetDNSLinkRecord(ctx context.Context, fqdn string) (path.Path, error) { diff --git a/gateway/serve_http_content.go b/gateway/serve_http_content.go new file mode 100644 index 000000000..2bb27ae04 --- /dev/null +++ b/gateway/serve_http_content.go @@ -0,0 +1,472 @@ +package gateway + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" + "time" +) + +// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of +// all of the byte-range-spec values is greater than the content size. +var errNoOverlap = errors.New("invalid range: failed to overlap") + +func headerGetExact(h http.Header, key string) string { + if v := h[key]; len(v) > 0 { + return v[0] + } + return "" +} + +// httpServeContent replies to the request using the content in the +// provided Reader. +// +// The main benefit of httpServeContent over io.Copy is that it handles Range requests properly, +// handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since, and If-Range requests. +// +// If modtime is not the zero time or Unix epoch, ServeContent +// includes it in a Last-Modified header in the response. If the +// request includes an If-Modified-Since header, ServeContent uses +// modtime to decide whether the content needs to be sent at all. +// +// If the caller has set w's ETag header formatted per RFC 7232, section 2.3, +// ServeContent uses it to handle requests using If-Match, If-None-Match, or If-Range. +// +// Notable differences from http.ServeContent +// 1. Takes an io.Reader instead of an io.ReaderSeeker +// 2. Requires the size to be passed in explicitly instead of discovered via Seeker behavior +// 3. Only handles a single HTTP Range, if multiple are requested it returns the first +// 4. The passed io.Reader must start at wherever the HTTP Range Request will start +// 4. Requires the Content-Type header to already be set +// 5. Does not require the name to be passed in for content sniffing +// 6. content may be nil for HEAD requests +func httpServeContent(w http.ResponseWriter, r *http.Request, modtime time.Time, size int64, content io.Reader) { + setLastModified(w, modtime) + done, rangeReq := checkPreconditions(w, r, modtime) + if done { + return + } + + code := http.StatusOK + + // handle Content-Range header. + sendSize := size + if size >= 0 { + ranges, err := parseRange(rangeReq, size) + if err != nil { + if err == errNoOverlap { + w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size)) + } + http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > size { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + ranges = nil + } + + // We only support a single range request, if more than one is submitted we just send back the first + if len(ranges) > 0 { + ra := ranges[0] + // RFC 7233, Section 4.1: + // "If a single part is being transferred, the server + // generating the 206 response MUST generate a + // Content-Range header field, describing what range + // of the selected representation is enclosed, and a + // payload consisting of the range. + // ... + // A server MUST NOT generate a multipart response to + // a request for a single range, since a client that + // does not request multiple parts might not support + // multipart responses." + + sendSize = ra.length + code = http.StatusPartialContent + w.Header().Set("Content-Range", ra.contentRange(size)) + } + + w.Header().Set("Accept-Ranges", "bytes") + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + } + + w.WriteHeader(code) + + if r.Method != "HEAD" { + var sendContent io.Reader = content + io.CopyN(w, sendContent, sendSize) + } +} + +// scanETag determines if a syntactically valid ETag is present at s. If so, +// the ETag and remaining text after consuming ETag is returned. Otherwise, +// it returns "", "". +func scanETag(s string) (etag string, remain string) { + s = textproto.TrimString(s) + start := 0 + if strings.HasPrefix(s, "W/") { + start = 2 + } + if len(s[start:]) < 2 || s[start] != '"' { + return "", "" + } + // ETag is either W/"text" or "text". + // See RFC 7232 2.3. + for i := start + 1; i < len(s); i++ { + c := s[i] + switch { + // Character values allowed in ETags. + case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80: + case c == '"': + return s[:i+1], s[i+1:] + default: + return "", "" + } + } + return "", "" +} + +// etagStrongMatch reports whether a and b match using strong ETag comparison. +// Assumes a and b are valid ETags. +func etagStrongMatch(a, b string) bool { + return a == b && a != "" && a[0] == '"' +} + +// etagWeakMatch reports whether a and b match using weak ETag comparison. +// Assumes a and b are valid ETags. +func etagWeakMatch(a, b string) bool { + return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/") +} + +// condResult is the result of an HTTP request precondition check. +// See https://tools.ietf.org/html/rfc7232 section 3. +type condResult int + +const ( + condNone condResult = iota + condTrue + condFalse +) + +func checkIfMatch(w http.ResponseWriter, r *http.Request) condResult { + im := r.Header.Get("If-Match") + if im == "" { + return condNone + } + for { + im = textproto.TrimString(im) + if len(im) == 0 { + break + } + if im[0] == ',' { + im = im[1:] + continue + } + if im[0] == '*' { + return condTrue + } + etag, remain := scanETag(im) + if etag == "" { + break + } + if etagStrongMatch(etag, headerGetExact(w.Header(), "Etag")) { + return condTrue + } + im = remain + } + + return condFalse +} + +func checkIfUnmodifiedSince(r *http.Request, modtime time.Time) condResult { + ius := r.Header.Get("If-Unmodified-Since") + if ius == "" || isZeroTime(modtime) { + return condNone + } + t, err := http.ParseTime(ius) + if err != nil { + return condNone + } + + // The Last-Modified header truncates sub-second precision so + // the modtime needs to be truncated too. + modtime = modtime.Truncate(time.Second) + if modtime.Before(t) || modtime.Equal(t) { + return condTrue + } + return condFalse +} + +func checkIfNoneMatch(w http.ResponseWriter, r *http.Request) condResult { + inm := headerGetExact(r.Header, "If-None-Match") + if inm == "" { + return condNone + } + buf := inm + for { + buf = textproto.TrimString(buf) + if len(buf) == 0 { + break + } + if buf[0] == ',' { + buf = buf[1:] + continue + } + if buf[0] == '*' { + return condFalse + } + etag, remain := scanETag(buf) + if etag == "" { + break + } + if etagWeakMatch(etag, headerGetExact(w.Header(), "Etag")) { + return condFalse + } + buf = remain + } + return condTrue +} + +func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult { + if r.Method != "GET" && r.Method != "HEAD" { + return condNone + } + ims := r.Header.Get("If-Modified-Since") + if ims == "" || isZeroTime(modtime) { + return condNone + } + t, err := http.ParseTime(ims) + if err != nil { + return condNone + } + // The Last-Modified header truncates sub-second precision so + // the modtime needs to be truncated too. + modtime = modtime.Truncate(time.Second) + if modtime.Before(t) || modtime.Equal(t) { + return condFalse + } + return condTrue +} + +func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) condResult { + if r.Method != "GET" && r.Method != "HEAD" { + return condNone + } + ir := headerGetExact(r.Header, "If-Range") + if ir == "" { + return condNone + } + etag, _ := scanETag(ir) + if etag != "" { + if etagStrongMatch(etag, w.Header().Get("Etag")) { + return condTrue + } else { + return condFalse + } + } + // The If-Range value is typically the ETag value, but it may also be + // the modtime date. See golang.org/issue/8367. + if modtime.IsZero() { + return condFalse + } + t, err := http.ParseTime(ir) + if err != nil { + return condFalse + } + if t.Unix() == modtime.Unix() { + return condTrue + } + return condFalse +} + +// isZeroTime reports whether t is obviously unspecified (either zero or Unix()=0). +func isZeroTime(t time.Time) bool { + return t.IsZero() || t.Equal(unixEpochTime) +} + +func setLastModified(w http.ResponseWriter, modtime time.Time) { + if !isZeroTime(modtime) { + w.Header().Set("Last-Modified", modtime.UTC().Format(http.TimeFormat)) + } +} + +func writeNotModified(w http.ResponseWriter) { + // RFC 7232 section 4.1: + // a sender SHOULD NOT generate representation metadata other than the + // above listed fields unless said metadata exists for the purpose of + // guiding cache updates (e.g., Last-Modified might be useful if the + // response does not have an ETag field). + h := w.Header() + delete(h, "Content-Type") + delete(h, "Content-Length") + delete(h, "Content-Encoding") + if h.Get("Etag") != "" { + delete(h, "Last-Modified") + } + w.WriteHeader(http.StatusNotModified) +} + +// checkPreconditions evaluates request preconditions and reports whether a precondition +// resulted in sending StatusNotModified or StatusPreconditionFailed. +func checkPreconditions(w http.ResponseWriter, r *http.Request, modtime time.Time) (done bool, rangeHeader string) { + // This function carefully follows RFC 7232 section 6. + ch := checkIfMatch(w, r) + if ch == condNone { + ch = checkIfUnmodifiedSince(r, modtime) + } + if ch == condFalse { + w.WriteHeader(http.StatusPreconditionFailed) + return true, "" + } + switch checkIfNoneMatch(w, r) { + case condFalse: + if r.Method == "GET" || r.Method == "HEAD" { + writeNotModified(w) + return true, "" + } else { + w.WriteHeader(http.StatusPreconditionFailed) + return true, "" + } + case condNone: + if checkIfModifiedSince(r, modtime) == condFalse { + writeNotModified(w) + return true, "" + } + } + + rangeHeader = headerGetExact(r.Header, "Range") + if rangeHeader != "" && checkIfRange(w, r, modtime) == condFalse { + rangeHeader = "" + } + return false, rangeHeader +} + +// httpRange specifies the byte range to be sent to the client. +type httpRange struct { + start, length int64 +} + +func (r httpRange) contentRange(size int64) string { + return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) +} + +// parseRange parses a Range header string as per RFC 7233. +// errNoOverlap is returned if none of the ranges overlap. +func parseRange(s string, size int64) ([]httpRange, error) { + if s == "" { + return nil, nil // header not present + } + const b = "bytes=" + if !strings.HasPrefix(s, b) { + return nil, errors.New("invalid range") + } + var ranges []httpRange + noOverlap := false + for _, ra := range strings.Split(s[len(b):], ",") { + ra = textproto.TrimString(ra) + if ra == "" { + continue + } + start, end, ok := strings.Cut(ra, "-") + if !ok { + return nil, errors.New("invalid range") + } + start, end = textproto.TrimString(start), textproto.TrimString(end) + var r httpRange + if start == "" { + // If no start is specified, end specifies the + // range start relative to the end of the file, + // and we are dealing with + // which has to be a non-negative integer as per + // RFC 7233 Section 2.1 "Byte-Ranges". + if end == "" || end[0] == '-' { + return nil, errors.New("invalid range") + } + i, err := strconv.ParseInt(end, 10, 64) + if i < 0 || err != nil { + return nil, errors.New("invalid range") + } + if i > size { + i = size + } + r.start = size - i + r.length = size - r.start + } else { + i, err := strconv.ParseInt(start, 10, 64) + if err != nil || i < 0 { + return nil, errors.New("invalid range") + } + if i >= size { + // If the range begins after the size of the content, + // then it does not overlap. + noOverlap = true + continue + } + r.start = i + if end == "" { + // If no end is specified, range extends to end of the file. + r.length = size - r.start + } else { + i, err := strconv.ParseInt(end, 10, 64) + if err != nil || r.start > i { + return nil, errors.New("invalid range") + } + if i >= size { + i = size - 1 + } + r.length = i - r.start + 1 + } + } + ranges = append(ranges, r) + } + if noOverlap && len(ranges) == 0 { + // The specified ranges did not overlap with the content. + return nil, errNoOverlap + } + return ranges, nil +} + +func sumRangesSize(ranges []httpRange) (size int64) { + for _, ra := range ranges { + size += ra.length + } + return +} + +// seekToStartOfFirstRange seeks to the start of the first Range if the request is an HTTP Range Request +func (i *handler) seekToStartOfFirstRange(w http.ResponseWriter, r *http.Request, data io.Seeker) bool { + rangeHeader := r.Header.Get("Range") + if rangeHeader != "" { + ranges, err := parseRangeWithoutLength(rangeHeader) + if err != nil { + i.webError(w, r, fmt.Errorf("invalid range request: %w", err), http.StatusBadRequest) + return false + } + if len(ranges) > 0 { + ra := &ranges[0] + err = seekToRangeStart(data, ra) + if err != nil { + i.webError(w, r, fmt.Errorf("could not seek to location in range request: %w", err), http.StatusBadRequest) + return false + } + } + } + return true +} + +func seekToRangeStart(data io.Seeker, ra *ByteRange) error { + if ra != nil && ra.From != 0 { + if _, err := data.Seek(int64(ra.From), io.SeekStart); err != nil { + return err + } + } + return nil +} diff --git a/gateway/testdata/pretty-404.car b/gateway/testdata/pretty-404.car deleted file mode 100644 index 3adec2904..000000000 Binary files a/gateway/testdata/pretty-404.car and /dev/null differ diff --git a/gateway/utilities_test.go b/gateway/utilities_test.go index 27ba43a14..85153f808 100644 --- a/gateway/utilities_test.go +++ b/gateway/utilities_test.go @@ -11,14 +11,13 @@ import ( "regexp" "strings" "testing" + "time" "github.com/ipfs/boxo/blockservice" - nsopts "github.com/ipfs/boxo/coreiface/options/namesys" - ipath "github.com/ipfs/boxo/coreiface/path" offline "github.com/ipfs/boxo/exchange/offline" "github.com/ipfs/boxo/files" "github.com/ipfs/boxo/namesys" - path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" carblockstore "github.com/ipld/go-car/v2/blockstore" "github.com/libp2p/go-libp2p/core/crypto" @@ -52,43 +51,60 @@ func mustDo(t *testing.T, req *http.Request) *http.Response { return res } -type mockNamesys map[string]path.Path +type mockNamesysItem struct { + path path.Path + ttl time.Duration +} + +func newMockNamesysItem(p path.Path, ttl time.Duration) *mockNamesysItem { + return &mockNamesysItem{path: p, ttl: ttl} +} -func (m mockNamesys) Resolve(ctx context.Context, name string, opts ...nsopts.ResolveOpt) (value path.Path, err error) { - cfg := nsopts.DefaultResolveOpts() +type mockNamesys map[string]*mockNamesysItem + +func (m mockNamesys) Resolve(ctx context.Context, p path.Path, opts ...namesys.ResolveOption) (result namesys.Result, err error) { + cfg := namesys.DefaultResolveOptions() for _, o := range opts { o(&cfg) } depth := cfg.Depth - if depth == nsopts.UnlimitedDepth { + if depth == namesys.UnlimitedDepth { // max uint depth = ^uint(0) } + var ( + value path.Path + ttl time.Duration + ) + name := path.SegmentsToString(p.Segments()[:2]...) for strings.HasPrefix(name, "/ipns/") { if depth == 0 { - return value, namesys.ErrResolveRecursion + return namesys.Result{Path: value, TTL: ttl}, namesys.ErrResolveRecursion } depth-- - var ok bool - value, ok = m[name] + v, ok := m[name] if !ok { - return "", namesys.ErrResolveFailed + return namesys.Result{}, namesys.ErrResolveFailed } + value = v.path + ttl = v.ttl name = value.String() } - return value, nil + + value, err = path.Join(value, p.Segments()[2:]...) + return namesys.Result{Path: value, TTL: ttl}, err } -func (m mockNamesys) ResolveAsync(ctx context.Context, name string, opts ...nsopts.ResolveOpt) <-chan namesys.Result { - out := make(chan namesys.Result, 1) - v, err := m.Resolve(ctx, name, opts...) - out <- namesys.Result{Path: v, Err: err} +func (m mockNamesys) ResolveAsync(ctx context.Context, p path.Path, opts ...namesys.ResolveOption) <-chan namesys.AsyncResult { + out := make(chan namesys.AsyncResult, 1) + res, err := m.Resolve(ctx, p, opts...) + out <- namesys.AsyncResult{Path: res.Path, TTL: res.TTL, LastMod: res.LastMod, Err: err} close(out) return out } -func (m mockNamesys) Publish(ctx context.Context, name crypto.PrivKey, value path.Path, opts ...nsopts.PublishOption) error { +func (m mockNamesys) Publish(ctx context.Context, name crypto.PrivKey, value path.Path, opts ...namesys.PublishOption) error { return errors.New("not implemented for mockNamesys") } @@ -133,27 +149,27 @@ func newMockBackend(t *testing.T, fixturesFile string) (*mockBackend, cid.Cid) { }, cids[0] } -func (mb *mockBackend) Get(ctx context.Context, immutablePath ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { +func (mb *mockBackend) Get(ctx context.Context, immutablePath path.ImmutablePath, ranges ...ByteRange) (ContentPathMetadata, *GetResponse, error) { return mb.gw.Get(ctx, immutablePath, ranges...) } -func (mb *mockBackend) GetAll(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (mb *mockBackend) GetAll(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, files.Node, error) { return mb.gw.GetAll(ctx, immutablePath) } -func (mb *mockBackend) GetBlock(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, files.File, error) { +func (mb *mockBackend) GetBlock(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, files.File, error) { return mb.gw.GetBlock(ctx, immutablePath) } -func (mb *mockBackend) Head(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, files.Node, error) { +func (mb *mockBackend) Head(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, *HeadResponse, error) { return mb.gw.Head(ctx, immutablePath) } -func (mb *mockBackend) GetCAR(ctx context.Context, immutablePath ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { +func (mb *mockBackend) GetCAR(ctx context.Context, immutablePath path.ImmutablePath, params CarParams) (ContentPathMetadata, io.ReadCloser, error) { return mb.gw.GetCAR(ctx, immutablePath, params) } -func (mb *mockBackend) ResolveMutable(ctx context.Context, p ipath.Path) (ImmutablePath, error) { +func (mb *mockBackend) ResolveMutable(ctx context.Context, p path.Path) (path.ImmutablePath, time.Duration, time.Time, error) { return mb.gw.ResolveMutable(ctx, p) } @@ -161,44 +177,49 @@ func (mb *mockBackend) GetIPNSRecord(ctx context.Context, c cid.Cid) ([]byte, er return nil, routing.ErrNotSupported } -func (mb *mockBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (ipath.Path, error) { +func (mb *mockBackend) GetDNSLinkRecord(ctx context.Context, hostname string) (path.Path, error) { if mb.namesys != nil { - p, err := mb.namesys.Resolve(ctx, "/ipns/"+hostname, nsopts.Depth(1)) + p, err := path.NewPath("/ipns/" + hostname) + if err != nil { + return nil, err + } + res, err := mb.namesys.Resolve(ctx, p, namesys.ResolveWithDepth(1)) if err == namesys.ErrResolveRecursion { err = nil } - return ipath.New(p.String()), err + p = res.Path + return p, err } return nil, errors.New("not implemented") } -func (mb *mockBackend) IsCached(ctx context.Context, p ipath.Path) bool { +func (mb *mockBackend) IsCached(ctx context.Context, p path.Path) bool { return mb.gw.IsCached(ctx, p) } -func (mb *mockBackend) ResolvePath(ctx context.Context, immutablePath ImmutablePath) (ContentPathMetadata, error) { +func (mb *mockBackend) ResolvePath(ctx context.Context, immutablePath path.ImmutablePath) (ContentPathMetadata, error) { return mb.gw.ResolvePath(ctx, immutablePath) } -func (mb *mockBackend) resolvePathNoRootsReturned(ctx context.Context, ip ipath.Path) (ipath.Resolved, error) { - var imPath ImmutablePath +func (mb *mockBackend) resolvePathNoRootsReturned(ctx context.Context, ip path.Path) (path.ImmutablePath, error) { + var imPath path.ImmutablePath var err error if ip.Mutable() { - imPath, err = mb.ResolveMutable(ctx, ip) + imPath, _, _, err = mb.ResolveMutable(ctx, ip) if err != nil { - return nil, err + return path.ImmutablePath{}, err } } else { - imPath, err = NewImmutablePath(ip) + imPath, err = path.NewImmutablePath(ip) if err != nil { - return nil, err + return path.ImmutablePath{}, err } } md, err := mb.ResolvePath(ctx, imPath) if err != nil { - return nil, err + return path.ImmutablePath{}, err } return md.LastSegment, nil } diff --git a/go.mod b/go.mod index d10e96e0a..157f4d280 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,6 @@ require ( github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-delay v0.0.1 github.com/ipfs/go-ipfs-redirects-file v0.1.1 - github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-format v0.5.0 github.com/ipfs/go-ipld-legacy v0.2.1 github.com/ipfs/go-log/v2 v2.5.1 @@ -37,21 +36,21 @@ require ( github.com/jbenet/goprocess v0.1.4 github.com/libp2p/go-buffer-pool v0.1.0 github.com/libp2p/go-doh-resolver v0.4.0 - github.com/libp2p/go-libp2p v0.30.0 + github.com/libp2p/go-libp2p v0.32.0 github.com/libp2p/go-libp2p-kad-dht v0.23.0 github.com/libp2p/go-libp2p-record v0.2.0 github.com/libp2p/go-libp2p-routing-helpers v0.7.0 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 - github.com/miekg/dns v1.1.55 + github.com/miekg/dns v1.1.56 github.com/mr-tron/base58 v1.2.0 github.com/multiformats/go-base32 v0.1.0 - github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multicodec v0.9.0 github.com/multiformats/go-multihash v0.2.3 - github.com/multiformats/go-multistream v0.4.1 + github.com/multiformats/go-multistream v0.5.0 github.com/pkg/errors v0.9.1 github.com/polydawn/refmt v0.89.0 github.com/prometheus/client_golang v1.16.0 @@ -67,9 +66,9 @@ require ( go.opentelemetry.io/otel/sdk v1.19.0 go.opentelemetry.io/otel/trace v1.19.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.25.0 + go.uber.org/zap v1.26.0 golang.org/x/oauth2 v0.10.0 - golang.org/x/sync v0.3.0 + golang.org/x/sync v0.4.0 golang.org/x/sys v0.13.0 google.golang.org/protobuf v1.31.0 ) @@ -83,7 +82,7 @@ require ( go.opentelemetry.io/otel/exporters/zipkin v1.18.0 go.opentelemetry.io/otel/sdk/metric v1.19.0 go.uber.org/zap/exp v0.1.0 - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d ) require ( @@ -103,26 +102,26 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/huin/goupnp v1.2.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/go-ds-leveldb v0.5.0 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect github.com/ipfs/go-ipfs-util v0.0.2 // indirect + github.com/ipfs/go-ipld-cbor v0.0.6 // indirect github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-unixfs v0.4.5 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.2 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect @@ -134,7 +133,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect @@ -142,7 +141,7 @@ require ( github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.11.0 // indirect + github.com/onsi/ginkgo/v2 v2.13.0 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect @@ -153,9 +152,9 @@ require ( github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.2 // indirect - github.com/quic-go/quic-go v0.38.0 // indirect - github.com/quic-go/webtransport-go v0.5.3 // indirect + github.com/quic-go/qtls-go1-20 v0.3.4 // indirect + github.com/quic-go/quic-go v0.39.3 // indirect + github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect @@ -166,13 +165,14 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.18.0 // indirect go.opentelemetry.io/otel/metric v1.19.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect - go.uber.org/dig v1.17.0 // indirect - go.uber.org/fx v1.20.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/fx v1.20.1 // indirect + go.uber.org/mock v0.3.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/mod v0.12.0 // indirect + golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index e2abc4903..b18b0d287 100644 --- a/go.sum +++ b/go.sum @@ -68,7 +68,6 @@ github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJn github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/gabriel-vasile/mimetype v1.4.1 h1:TRWk7se+TOjCYgRth7+1/OYLNiRNIotknkFtf/dnN7Q= github.com/gabriel-vasile/mimetype v1.4.1/go.mod h1:05Vi0w3Y9c/lNvJOdmIwvrrAhX3rYhfQQCaf9VJcv7M= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -98,8 +97,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -132,8 +129,8 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f h1:pDhu5sgp8yJlEF/g6osliIIpF9K4F5jvkULXa4daRDQ= -github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -163,9 +160,10 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= -github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= @@ -236,6 +234,7 @@ github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOan github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20230102063945-1a409dc236dd h1:gMlw/MhNr2Wtp5RwGdsW23cs+yCuj9k2ON7i9MiJlRo= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= @@ -250,8 +249,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= @@ -272,8 +271,8 @@ github.com/libp2p/go-doh-resolver v0.4.0 h1:gUBa1f1XsPwtpE1du0O+nnZCUqtG7oYi7Bb+ github.com/libp2p/go-doh-resolver v0.4.0/go.mod h1:v1/jwsFusgsWIGX/c6vCRrnJ60x7bhTiq/fs2qt0cAg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= -github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= +github.com/libp2p/go-libp2p v0.32.0 h1:86I4B7nBUPIyTgw3+5Ibq6K7DdKRCuZw8URCfPc1hQM= +github.com/libp2p/go-libp2p v0.32.0/go.mod h1:hXXC3kXPlBZ1eu8Q2hptGrMB4mZ3048JUoS4EKaHW5c= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-kad-dht v0.23.0 h1:sxE6LxLopp79eLeV695n7+c77V/Vn4AMF28AdM/XFqM= @@ -301,15 +300,15 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -336,8 +335,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= -github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= +github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= +github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= github.com/multiformats/go-multiaddr-dns v0.3.0/go.mod h1:mNzQ4eTGDg0ll1N9jKPOUogZPoJ30W8a7zk66FQPpdQ= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= @@ -355,20 +354,19 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -410,12 +408,12 @@ github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwa github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= -github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc= -github.com/quic-go/quic-go v0.38.0/go.mod h1:MPCuRq7KBK2hNcfKj/1iD1BGuN3eAYMeNxp3T42LRUg= -github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= -github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= +github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.39.3 h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg= +github.com/quic-go/quic-go v0.39.3/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= +github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= +github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -534,12 +532,14 @@ go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v8 go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= -go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= +go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -547,8 +547,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go.uber.org/zap/exp v0.1.0 h1:Ol9zQNvAEAgFHSBiR5LlwS9Xq8u5QF+7HBwNHUB8rcI= go.uber.org/zap/exp v0.1.0/go.mod h1:z/0T3As39ttolxZGOsvk1OEvQfwwfTZpmV9YTp+VAkc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= @@ -566,8 +566,8 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -579,8 +579,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -617,8 +617,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -669,10 +669,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -731,6 +730,7 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= diff --git a/ipns/README.md b/ipns/README.md index 502f6dded..28989e0c9 100644 --- a/ipns/README.md +++ b/ipns/README.md @@ -30,7 +30,10 @@ func main() { } // Define the path this record will point to. - path := path.FromString("/ipfs/bafkqac3jobxhgidsn5rww4yk") + path, err := path.NewPath("/ipfs/bafkqac3jobxhgidsn5rww4yk") + if err != nil { + panic(err) + } // Until when the record is valid. eol := time.Now().Add(time.Hour) diff --git a/ipns/defaults.go b/ipns/defaults.go new file mode 100644 index 000000000..c2179e61e --- /dev/null +++ b/ipns/defaults.go @@ -0,0 +1,17 @@ +package ipns + +import "time" + +const ( + + // DefaultRecordLifetime defines for how long IPNS record should be valid + // when ValidityType is 0. The default here aims to match the record + // expiration window of Amino DHT. + DefaultRecordLifetime = 48 * time.Hour + + // DefaultRecordTTL specifies how long the record can be returned from + // cache before checking for update again. The function of this TTL is + // similar to TTL of DNS record, and the default here is a trade-off + // between faster updates and benefiting from various types of caching. + DefaultRecordTTL = 1 * time.Hour +) diff --git a/ipns/name.go b/ipns/name.go index 2a6bbdbf4..e76a3ae33 100644 --- a/ipns/name.go +++ b/ipns/name.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" + "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p/core/peer" mb "github.com/multiformats/go-multibase" @@ -24,7 +25,7 @@ const ( // [Multihash]: https://multiformats.io/multihash/ // [IPNS Name]: https://specs.ipfs.tech/ipns/ipns-record/#ipns-name type Name struct { - src []byte + multihash string // binary Multihash without multibase envelope } // NameFromString creates a [Name] from the given IPNS Name in its [string representation]. @@ -56,7 +57,7 @@ func NameFromRoutingKey(data []byte) (Name, error) { // NameFromPeer creates a [Name] from the given [peer.ID]. func NameFromPeer(pid peer.ID) Name { - return Name{src: []byte(pid)} + return Name{multihash: string(pid)} } // NameFromCid creates a [Name] from the given [cid.Cid]. @@ -65,7 +66,7 @@ func NameFromCid(c cid.Cid) (Name, error) { if code != mc.Libp2pKey { return Name{}, fmt.Errorf("CID codec %q is not allowed for IPNS Names, use %q instead", code, mc.Libp2pKey) } - return Name{src: c.Hash()}, nil + return Name{multihash: string(c.Hash())}, nil } // RoutingKey returns the binary IPNS Routing Key for the given [Name]. Note that @@ -76,14 +77,14 @@ func NameFromCid(c cid.Cid) (Name, error) { func (n Name) RoutingKey() []byte { var buffer bytes.Buffer buffer.WriteString(NamespacePrefix) - buffer.Write(n.src) // Note: we append raw multihash bytes (no multibase) + buffer.WriteString(n.multihash) // Note: we append raw multihash bytes (no multibase) return buffer.Bytes() } // Cid returns [Name] encoded as a [cid.Cid] of the public key. If the IPNS Name // is invalid (e.g., empty), this will return the empty Cid. func (n Name) Cid() cid.Cid { - m, err := mh.Cast([]byte(n.src)) + m, err := mh.Cast([]byte(n.multihash)) if err != nil { return cid.Undef } @@ -92,7 +93,7 @@ func (n Name) Cid() cid.Cid { // Peer returns [Name] as a [peer.ID]. func (n Name) Peer() peer.ID { - return peer.ID(n.src) + return peer.ID(n.multihash) } // String returns the human-readable IPNS Name, encoded as a CIDv1 with libp2p-key @@ -131,5 +132,14 @@ func (n Name) MarshalJSON() ([]byte, error) { // Equal returns whether the records are equal. func (n Name) Equal(other Name) bool { - return bytes.Equal(n.src, other.src) + return n.multihash == other.multihash +} + +// AsPath returns the IPNS Name as a [path.Path] prefixed by [path.IPNSNamespace]. +func (n Name) AsPath() path.Path { + p, err := path.NewPathFromSegments(path.IPNSNamespace, n.String()) + if err != nil { + panic(fmt.Errorf("path.NewPathFromSegments was called with invalid parameters: %w", err)) + } + return p } diff --git a/ipns/name_test.go b/ipns/name_test.go index 4b8ccd414..c3c31878c 100644 --- a/ipns/name_test.go +++ b/ipns/name_test.go @@ -34,6 +34,16 @@ func TestName(t *testing.T) { }) } + testPath := func(t *testing.T, name, input, expected string) { + t.Run("AsPath method: "+name, func(t *testing.T) { + t.Parallel() + + name, err := NameFromString(input) + require.NoError(t, err) + require.Equal(t, expected, name.AsPath().String()) + }) + } + testMarshalJSON := func(t *testing.T, name, input, expected string) { t.Run("Marshal JSON: "+name, func(t *testing.T) { t.Parallel() @@ -66,6 +76,7 @@ func TestName(t *testing.T) { testFromCid(t, v[0], v[2], v[2]) testString(t, v[0], v[1], v[2]) testString(t, v[0], NamespacePrefix+v[1], v[2]) + testPath(t, v[0], v[1], NamespacePrefix+v[2]) testMarshalJSON(t, v[0], v[1], `"`+v[2]+`"`) testMarshalJSON(t, v[0], NamespacePrefix+v[1], `"`+v[2]+`"`) testUnmarshalJSON(t, v[0], []byte(`"`+v[2]+`"`), v[2]) diff --git a/ipns/record.go b/ipns/record.go index cdb901af0..ddb57ac42 100644 --- a/ipns/record.go +++ b/ipns/record.go @@ -85,12 +85,12 @@ func MarshalRecord(rec *Record) ([]byte, error) { func (rec *Record) Value() (path.Path, error) { value, err := rec.getBytesValue(cborValueKey) if err != nil { - return "", err + return nil, err } - p := path.FromString(string(value)) - if err := p.IsValid(); err != nil { - return "", multierr.Combine(ErrInvalidPath, err) + p, err := path.NewPath(string(value)) + if err != nil { + return nil, multierr.Combine(ErrInvalidPath, err) } return p, nil @@ -259,7 +259,7 @@ func NewRecord(sk ic.PrivKey, value path.Path, seq uint64, eol time.Time, ttl ti } if options.v1Compatibility { - pb.Value = []byte(value) + pb.Value = []byte(value.String()) typ := ipns_pb.IpnsRecord_EOL pb.ValidityType = &typ pb.Sequence = &seq @@ -306,7 +306,7 @@ func createNode(value path.Path, seq uint64, eol time.Time, ttl time.Duration) ( m := make(map[string]ipld.Node) var keys []string - m[cborValueKey] = basicnode.NewBytes([]byte(value)) + m[cborValueKey] = basicnode.NewBytes([]byte(value.String())) keys = append(keys, cborValueKey) m[cborValidityKey] = basicnode.NewBytes([]byte(util.FormatRFC3339(eol))) diff --git a/ipns/record_test.go b/ipns/record_test.go index d761ecfc5..db92a6a17 100644 --- a/ipns/record_test.go +++ b/ipns/record_test.go @@ -17,10 +17,18 @@ import ( "google.golang.org/protobuf/proto" ) -const ( - testPath = path.Path("/ipfs/bafkqac3jobxhgidsn5rww4yk") +var ( + testPath path.Path ) +func init() { + var err error + testPath, err = path.NewPath("/ipfs/bafkqac3jobxhgidsn5rww4yk") + if err != nil { + panic(err) + } +} + func mustKeyPair(t *testing.T, typ int) (ic.PrivKey, ic.PubKey, Name) { sr := util.NewTimeSeededRand() sk, pk, err := ic.GenerateKeyPairWithReader(typ, 2048, sr) @@ -195,14 +203,15 @@ func TestCBORDataSerialization(t *testing.T) { sk, _, _ := mustKeyPair(t, ic.Ed25519) eol := time.Now().Add(time.Hour) - path := path.FromString(string(append([]byte("/path/1"), 0x00))) + path, err := path.Join(testPath, string([]byte{0x00})) + require.NoError(t, err) seq := uint64(1) ttl := time.Hour rec := mustNewRecord(t, sk, path, seq, eol, ttl) builder := basicnode.Prototype__Map{}.NewBuilder() - err := dagcbor.Decode(builder, bytes.NewReader(rec.pb.GetData())) + err = dagcbor.Decode(builder, bytes.NewReader(rec.pb.GetData())) require.NoError(t, err) node := builder.Build() @@ -218,7 +227,7 @@ func TestCBORDataSerialization(t *testing.T) { case cborValueKey: b, err := v.AsBytes() require.NoError(t, err) - require.Equal(t, b, []byte(path)) + require.Equal(t, b, []byte(path.String())) case cborSequenceKey: s, err := v.AsInt() require.NoError(t, err) diff --git a/ipns/validation_test.go b/ipns/validation_test.go index 126c357e7..8195fde23 100644 --- a/ipns/validation_test.go +++ b/ipns/validation_test.go @@ -164,8 +164,14 @@ func TestValidate(t *testing.T) { v := Validator{} - rec1 := mustNewRecord(t, sk, path.FromString("/path/1"), 1, eol, 0, WithV1Compatibility(true)) - rec2 := mustNewRecord(t, sk, path.FromString("/path/2"), 2, eol, 0, WithV1Compatibility(true)) + path1, err := path.Join(testPath, "1") + require.NoError(t, err) + + path2, err := path.Join(testPath, "2") + require.NoError(t, err) + + rec1 := mustNewRecord(t, sk, path1, 1, eol, 0, WithV1Compatibility(true)) + rec2 := mustNewRecord(t, sk, path2, 2, eol, 0, WithV1Compatibility(true)) best, err := v.Select(ipnsRoutingKey, [][]byte{mustMarshal(t, rec1), mustMarshal(t, rec2)}) require.NoError(t, err) @@ -210,8 +216,10 @@ func TestValidate(t *testing.T) { sk, pk, _ := mustKeyPair(t, ic.RSA) // Create a record that is too large (value + other fields). - value := make([]byte, MaxRecordSize) - rec, err := NewRecord(sk, path.FromString(string(value)), 1, eol, 0) + path, err := path.Join(testPath, string(make([]byte, MaxRecordSize))) + require.NoError(t, err) + + rec, err := NewRecord(sk, path, 1, eol, 0) require.NoError(t, err) err = Validate(rec, pk) diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index 9ecdbffd5..2bdf82994 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -12,6 +12,7 @@ import ( "os" gopath "path" "sort" + "strings" "sync" "testing" "time" @@ -24,7 +25,6 @@ import ( ft "github.com/ipfs/boxo/ipld/unixfs" importer "github.com/ipfs/boxo/ipld/unixfs/importer" uio "github.com/ipfs/boxo/ipld/unixfs/io" - path "github.com/ipfs/boxo/path" u "github.com/ipfs/boxo/util" cid "github.com/ipfs/go-cid" @@ -59,7 +59,7 @@ func fileNodeFromReader(t *testing.T, ds ipld.DAGService, r io.Reader) ipld.Node } func mkdirP(t *testing.T, root *Directory, pth string) *Directory { - dirs := path.SplitList(pth) + dirs := strings.Split(pth, "/") cur := root for _, d := range dirs { n, err := cur.Mkdir(d) @@ -145,7 +145,7 @@ func assertFileAtPath(ds ipld.DAGService, root *Directory, expn ipld.Node, pth s return dag.ErrNotProtobuf } - parts := path.SplitList(pth) + parts := strings.Split(pth, "/") cur := root for i, d := range parts[:len(parts)-1] { next, err := cur.Child(d) diff --git a/mfs/ops.go b/mfs/ops.go index 78156dd52..b3d9f2a5e 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -7,8 +7,6 @@ import ( gopath "path" "strings" - path "github.com/ipfs/boxo/path" - cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" ) @@ -131,7 +129,7 @@ func Mkdir(r *Root, pth string, opts MkdirOpts) error { if pth == "" { return fmt.Errorf("no path given to Mkdir") } - parts := path.SplitList(pth) + parts := strings.Split(pth, "/") if parts[0] == "" { parts = parts[1:] } @@ -167,7 +165,7 @@ func Mkdir(r *Root, pth string, opts MkdirOpts) error { next, ok := fsn.(*Directory) if !ok { - return fmt.Errorf("%s was not a directory", path.Join(parts[:i])) + return fmt.Errorf("%s was not a directory", strings.Join(parts[:i], "/")) } cur = next } @@ -205,7 +203,7 @@ func Lookup(r *Root, path string) (FSNode, error) { // under the directory 'd' func DirLookup(d *Directory, pth string) (FSNode, error) { pth = strings.Trim(pth, "/") - parts := path.SplitList(pth) + parts := strings.Split(pth, "/") if len(parts) == 1 && parts[0] == "" { return d, nil } @@ -215,7 +213,7 @@ func DirLookup(d *Directory, pth string) (FSNode, error) { for i, p := range parts { chdir, ok := cur.(*Directory) if !ok { - return nil, fmt.Errorf("cannot access %s: Not a directory", path.Join(parts[:i+1])) + return nil, fmt.Errorf("cannot access %s: Not a directory", strings.Join(parts[:i+1], "/")) } child, err := chdir.Child(p) diff --git a/namesys/base.go b/namesys/base.go deleted file mode 100644 index 06b24bedc..000000000 --- a/namesys/base.go +++ /dev/null @@ -1,126 +0,0 @@ -package namesys - -import ( - "context" - "strings" - "time" - - opts "github.com/ipfs/boxo/coreiface/options/namesys" - path "github.com/ipfs/boxo/path" -) - -type onceResult struct { - value path.Path - ttl time.Duration - err error -} - -type resolver interface { - resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult -} - -// resolve is a helper for implementing Resolver.ResolveN using resolveOnce. -func resolve(ctx context.Context, r resolver, name string, options opts.ResolveOpts) (path.Path, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - err := ErrResolveFailed - var p path.Path - - resCh := resolveAsync(ctx, r, name, options) - - for res := range resCh { - p, err = res.Path, res.Err - if err != nil { - break - } - } - - return p, err -} - -func resolveAsync(ctx context.Context, r resolver, name string, options opts.ResolveOpts) <-chan Result { - ctx, span := StartSpan(ctx, "ResolveAsync") - defer span.End() - - resCh := r.resolveOnceAsync(ctx, name, options) - depth := options.Depth - outCh := make(chan Result, 1) - - go func() { - defer close(outCh) - ctx, span := StartSpan(ctx, "ResolveAsync.Worker") - defer span.End() - - var subCh <-chan Result - var cancelSub context.CancelFunc - defer func() { - if cancelSub != nil { - cancelSub() - } - }() - - for { - select { - case res, ok := <-resCh: - if !ok { - resCh = nil - break - } - - if res.err != nil { - emitResult(ctx, outCh, Result{Err: res.err}) - return - } - log.Debugf("resolved %s to %s", name, res.value.String()) - if !strings.HasPrefix(res.value.String(), ipnsPrefix) { - emitResult(ctx, outCh, Result{Path: res.value}) - break - } - - if depth == 1 { - emitResult(ctx, outCh, Result{Path: res.value, Err: ErrResolveRecursion}) - break - } - - subopts := options - if subopts.Depth > 1 { - subopts.Depth-- - } - - var subCtx context.Context - if cancelSub != nil { - // Cancel previous recursive resolve since it won't be used anyways - cancelSub() - } - subCtx, cancelSub = context.WithCancel(ctx) - _ = cancelSub - - p := strings.TrimPrefix(res.value.String(), ipnsPrefix) - subCh = resolveAsync(subCtx, r, p, subopts) - case res, ok := <-subCh: - if !ok { - subCh = nil - break - } - - // We don't bother returning here in case of context timeout as there is - // no good reason to do that, and we may still be able to emit a result - emitResult(ctx, outCh, res) - case <-ctx.Done(): - return - } - if resCh == nil && subCh == nil { - return - } - } - }() - return outCh -} - -func emitResult(ctx context.Context, outCh chan<- Result, r Result) { - select { - case outCh <- r: - case <-ctx.Done(): - } -} diff --git a/namesys/cache.go b/namesys/cache.go deleted file mode 100644 index 8b7f50794..000000000 --- a/namesys/cache.go +++ /dev/null @@ -1,62 +0,0 @@ -package namesys - -import ( - "time" - - path "github.com/ipfs/boxo/path" -) - -func (ns *mpns) cacheGet(name string) (path.Path, bool) { - // existence of optional mapping defined via IPFS_NS_MAP is checked first - if ns.staticMap != nil { - val, ok := ns.staticMap[name] - if ok { - return val, true - } - } - - if ns.cache == nil { - return "", false - } - - ientry, ok := ns.cache.Get(name) - if !ok { - return "", false - } - - entry, ok := ientry.(cacheEntry) - if !ok { - // should never happen, purely for sanity - log.Panicf("unexpected type %T in cache for %q.", ientry, name) - } - - if time.Now().Before(entry.eol) { - return entry.val, true - } - - ns.cache.Remove(name) - - return "", false -} - -func (ns *mpns) cacheSet(name string, val path.Path, ttl time.Duration) { - if ns.cache == nil || ttl <= 0 { - return - } - ns.cache.Add(name, cacheEntry{ - val: val, - eol: time.Now().Add(ttl), - }) -} - -func (ns *mpns) cacheInvalidate(name string) { - if ns.cache == nil { - return - } - ns.cache.Remove(name) -} - -type cacheEntry struct { - val path.Path - eol time.Time -} diff --git a/namesys/dns.go b/namesys/dns.go deleted file mode 100644 index 6f846fcda..000000000 --- a/namesys/dns.go +++ /dev/null @@ -1,195 +0,0 @@ -package namesys - -import ( - "context" - "errors" - "fmt" - "net" - gpath "path" - "strings" - - opts "github.com/ipfs/boxo/coreiface/options/namesys" - path "github.com/ipfs/boxo/path" - dns "github.com/miekg/dns" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -// LookupTXTFunc is a function that lookups TXT record values. -type LookupTXTFunc func(ctx context.Context, name string) (txt []string, err error) - -// DNSResolver implements a Resolver on DNS domains -type DNSResolver struct { - lookupTXT LookupTXTFunc - // TODO: maybe some sort of caching? - // cache would need a timeout -} - -// NewDNSResolver constructs a name resolver using DNS TXT records. -func NewDNSResolver(lookup LookupTXTFunc) *DNSResolver { - return &DNSResolver{lookupTXT: lookup} -} - -// Resolve implements Resolver. -func (r *DNSResolver) Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (path.Path, error) { - ctx, span := StartSpan(ctx, "DNSResolver.Resolve") - defer span.End() - - return resolve(ctx, r, name, opts.ProcessOpts(options)) -} - -// ResolveAsync implements Resolver. -func (r *DNSResolver) ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result { - ctx, span := StartSpan(ctx, "DNSResolver.ResolveAsync") - defer span.End() - - return resolveAsync(ctx, r, name, opts.ProcessOpts(options)) -} - -type lookupRes struct { - path path.Path - error error -} - -// resolveOnce implements resolver. -// TXT records for a given domain name should contain a b58 -// encoded multihash. -func (r *DNSResolver) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { - ctx, span := StartSpan(ctx, "DNSResolver.ResolveOnceAsync") - defer span.End() - - var fqdn string - out := make(chan onceResult, 1) - segments := strings.SplitN(name, "/", 2) - domain := segments[0] - - if _, ok := dns.IsDomainName(domain); !ok { - out <- onceResult{err: fmt.Errorf("not a valid domain name: %s", domain)} - close(out) - return out - } - log.Debugf("DNSResolver resolving %s", domain) - - if strings.HasSuffix(domain, ".") { - fqdn = domain - } else { - fqdn = domain + "." - } - - rootChan := make(chan lookupRes, 1) - go workDomain(ctx, r, fqdn, rootChan) - - subChan := make(chan lookupRes, 1) - go workDomain(ctx, r, "_dnslink."+fqdn, subChan) - - appendPath := func(p path.Path) (path.Path, error) { - if len(segments) > 1 { - return path.FromSegments("", strings.TrimRight(p.String(), "/"), segments[1]) - } - return p, nil - } - - go func() { - defer close(out) - ctx, span := StartSpan(ctx, "DNSResolver.ResolveOnceAsync.Worker") - defer span.End() - - var rootResErr, subResErr error - for { - select { - case subRes, ok := <-subChan: - if !ok { - subChan = nil - break - } - if subRes.error == nil { - p, err := appendPath(subRes.path) - emitOnceResult(ctx, out, onceResult{value: p, err: err}) - // Return without waiting for rootRes, since this result - // (for "_dnslink."+fqdn) takes precedence - return - } - subResErr = subRes.error - case rootRes, ok := <-rootChan: - if !ok { - rootChan = nil - break - } - if rootRes.error == nil { - p, err := appendPath(rootRes.path) - emitOnceResult(ctx, out, onceResult{value: p, err: err}) - // Do not return here. Wait for subRes so that it is - // output last if good, thereby giving subRes precedence. - } else { - rootResErr = rootRes.error - } - case <-ctx.Done(): - return - } - if subChan == nil && rootChan == nil { - // If here, then both lookups are done - // - // If both lookups failed due to no TXT records with a - // dnslink, then output a more specific error message - if rootResErr == ErrResolveFailed && subResErr == ErrResolveFailed { - // Wrap error so that it can be tested if it is a ErrResolveFailed - err := fmt.Errorf("%w: _dnslink subdomain at %q is missing a TXT record (https://docs.ipfs.tech/concepts/dnslink/)", ErrResolveFailed, gpath.Base(name)) - emitOnceResult(ctx, out, onceResult{err: err}) - } - return - } - } - }() - - return out -} - -func workDomain(ctx context.Context, r *DNSResolver, name string, res chan lookupRes) { - ctx, span := StartSpan(ctx, "DNSResolver.WorkDomain", trace.WithAttributes(attribute.String("Name", name))) - defer span.End() - - defer close(res) - - txt, err := r.lookupTXT(ctx, name) - if err != nil { - if dnsErr, ok := err.(*net.DNSError); ok { - // If no TXT records found, return same error as when no text - // records contain dnslink. Otherwise, return the actual error. - if dnsErr.IsNotFound { - err = ErrResolveFailed - } - } - // Could not look up any text records for name - res <- lookupRes{"", err} - return - } - - for _, t := range txt { - p, err := parseEntry(t) - if err == nil { - res <- lookupRes{p, nil} - return - } - } - - // There were no TXT records with a dnslink - res <- lookupRes{"", ErrResolveFailed} -} - -func parseEntry(txt string) (path.Path, error) { - p, err := path.ParseCidToPath(txt) // bare IPFS multihashes - if err == nil { - return p, nil - } - - return tryParseDNSLink(txt) -} - -func tryParseDNSLink(txt string) (path.Path, error) { - parts := strings.SplitN(txt, "=", 2) - if len(parts) == 2 && parts[0] == "dnslink" { - return path.ParsePath(parts[1]) - } - - return "", errors.New("not a valid dnslink entry") -} diff --git a/namesys/dns_resolver.go b/namesys/dns_resolver.go new file mode 100644 index 000000000..867b1b574 --- /dev/null +++ b/namesys/dns_resolver.go @@ -0,0 +1,182 @@ +package namesys + +import ( + "context" + "errors" + "fmt" + "net" + gopath "path" + "strings" + "time" + + path "github.com/ipfs/boxo/path" + "github.com/ipfs/go-cid" + dns "github.com/miekg/dns" + "github.com/samber/lo" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// LookupTXTFunc is a function that lookups TXT record values. +type LookupTXTFunc func(ctx context.Context, name string) (txt []string, err error) + +// DNSResolver implements [Resolver] on DNS domains. +type DNSResolver struct { + lookupTXT LookupTXTFunc +} + +var _ Resolver = &DNSResolver{} + +// NewDNSResolver constructs a name resolver using DNS TXT records. +func NewDNSResolver(lookup LookupTXTFunc) *DNSResolver { + return &DNSResolver{lookupTXT: lookup} +} + +func (r *DNSResolver) Resolve(ctx context.Context, p path.Path, options ...ResolveOption) (Result, error) { + ctx, span := startSpan(ctx, "DNSResolver.Resolve", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + return resolve(ctx, r, p, ProcessResolveOptions(options)) +} + +func (r *DNSResolver) ResolveAsync(ctx context.Context, p path.Path, options ...ResolveOption) <-chan AsyncResult { + ctx, span := startSpan(ctx, "DNSResolver.ResolveAsync", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + return resolveAsync(ctx, r, p, ProcessResolveOptions(options)) +} + +func (r *DNSResolver) resolveOnceAsync(ctx context.Context, p path.Path, options ResolveOptions) <-chan AsyncResult { + ctx, span := startSpan(ctx, "DNSResolver.ResolveOnceAsync", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + out := make(chan AsyncResult, 1) + if p.Namespace() != path.IPNSNamespace { + out <- AsyncResult{Err: fmt.Errorf("unsupported namespace: %q", p.Namespace())} + close(out) + return out + } + + fqdn := p.Segments()[1] + if _, ok := dns.IsDomainName(fqdn); !ok { + out <- AsyncResult{Err: fmt.Errorf("not a valid domain name: %q", fqdn)} + close(out) + return out + } + + log.Debugf("DNSResolver resolving %q", fqdn) + + if !strings.HasSuffix(fqdn, ".") { + fqdn += "." + } + + resChan := make(chan AsyncResult, 1) + go workDomain(ctx, r, "_dnslink."+fqdn, resChan) + + go func() { + defer close(out) + ctx, span := startSpan(ctx, "DNSResolver.ResolveOnceAsync.Worker") + defer span.End() + + select { + case subRes, ok := <-resChan: + if !ok { + break + } + if subRes.Err == nil { + p, err := joinPaths(subRes.Path, p) + emitOnceResult(ctx, out, AsyncResult{Path: p, LastMod: time.Now(), Err: err}) + // Return without waiting for rootRes, since this result + // (for "_dnslink."+fqdn) takes precedence + } else { + err := fmt.Errorf("DNSLink lookup for %q failed: %w", gopath.Base(fqdn), subRes.Err) + emitOnceResult(ctx, out, AsyncResult{Err: err}) + } + return + case <-ctx.Done(): + return + } + }() + + return out +} + +func workDomain(ctx context.Context, r *DNSResolver, name string, res chan AsyncResult) { + ctx, span := startSpan(ctx, "DNSResolver.WorkDomain", trace.WithAttributes(attribute.String("Name", name))) + defer span.End() + + defer close(res) + + txt, err := r.lookupTXT(ctx, name) + if err != nil { + var dnsErr *net.DNSError + if errors.As(err, &dnsErr) { + // If no TXT records found, return same error as when no text + // records contain dnslink. Otherwise, return the actual error. + if dnsErr.IsNotFound { + err = ErrMissingDNSLinkRecord + } + } + // Could not look up any text records for name + res <- AsyncResult{Err: err} + return + } + + // Convert all the found TXT records into paths. Ignore invalid ones. + var paths []path.Path + for _, t := range txt { + p, err := parseEntry(t) + if err == nil { + paths = append(paths, p) + } + } + + // Filter only the IPFS and IPNS paths. + paths = lo.Filter(paths, func(item path.Path, index int) bool { + return item.Namespace() == path.IPFSNamespace || + item.Namespace() == path.IPNSNamespace + }) + + switch len(paths) { + case 0: + // There were no TXT records with a dnslink + res <- AsyncResult{Err: ErrMissingDNSLinkRecord} + case 1: + // Found 1 valid! Return it. + res <- AsyncResult{Path: paths[0]} + default: + // Found more than 1 IPFS/IPNS path. + res <- AsyncResult{Err: ErrMultipleDNSLinkRecords} + } +} + +func parseEntry(txt string) (path.Path, error) { + p, err := path.NewPath(txt) // bare IPFS multihashes + if err == nil { + return p, nil + } + + // Support legacy DNSLink entries composed by the CID only. + if cid, err := cid.Decode(txt); err == nil { + return path.FromCid(cid), nil + } + + return tryParseDNSLink(txt) +} + +func tryParseDNSLink(txt string) (path.Path, error) { + parts := strings.SplitN(txt, "=", 2) + if len(parts) == 2 && parts[0] == "dnslink" { + p, err := path.NewPath(parts[1]) + if err == nil { + return p, nil + } + + // Support legacy DNSLink entries composed by "dnslink={CID}". + if cid, err := cid.Decode(parts[1]); err == nil { + return path.FromCid(cid), nil + } + } + + return nil, errors.New("not a valid dnslink entry") +} diff --git a/namesys/dns_resolver_test.go b/namesys/dns_resolver_test.go new file mode 100644 index 000000000..c174a590a --- /dev/null +++ b/namesys/dns_resolver_test.go @@ -0,0 +1,208 @@ +package namesys + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDNSParseEntry(t *testing.T) { + t.Parallel() + + t.Run("Valid entries", func(t *testing.T) { + t.Parallel() + + for _, entry := range []string{ + "QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo", + "dnslink=/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/bar", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo/bar/baz", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo/bar/baz/", + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + } { + _, err := parseEntry(entry) + assert.NoError(t, err) + } + }) + + t.Run("Invalid entries", func(t *testing.T) { + t.Parallel() + + for _, entry := range []string{ + "QmYhE8xgFCjGcz6PHgnvJz5NOTCORRECT", + "quux=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "dnslink=", + "dnslink=/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo", + "dnslink=ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/bar", + } { + _, err := parseEntry(entry) + assert.Error(t, err) + } + }) +} + +type mockDNS struct { + entries map[string][]string +} + +func (m *mockDNS) lookupTXT(ctx context.Context, name string) (txt []string, err error) { + txt, ok := m.entries[name] + if !ok { + return nil, &net.DNSError{IsNotFound: true} + } + return txt, nil +} + +func newMockDNS() *mockDNS { + return &mockDNS{ + entries: map[string][]string{ + "_dnslink.multihash.example.com.": { + "dnslink=QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.ipfs.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.dipfs.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.dns1.example.com.": { + "dnslink=/ipns/ipfs.example.com", + }, + "_dnslink.dns2.example.com.": { + "dnslink=/ipns/dns1.example.com", + }, + "_dnslink.multi.example.com.": { + "some stuff", + "dnslink=/ipns/dns1.example.com", + "masked dnslink=/ipns/example.invalid", + }, + "_dnslink.multi-invalid.example.com.": { + "some stuff", + "dnslink=/ipns/dns1.example.com", // we must error when >1 value with /ipns or /ipfs exists + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "broken dnslink=/ipns/example.invalid", + }, + "_dnslink.multi-valid.example.com.": { + "some stuff", + "dnslink=/foo/bar", // duplicate dnslink= is fine as long it is not /ipfs or /ipns, which must be unique + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + "broken dnslink=/ipns/example.invalid", + }, + "_dnslink.equals.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/=equals", + }, + "_dnslink.loop1.example.com.": { + "dnslink=/ipns/loop2.example.com", + }, + "_dnslink.loop2.example.com.": { + "dnslink=/ipns/loop1.example.com", + }, + "_dnslink.dloop1.example.com.": { + "dnslink=/ipns/loop2.example.com", + }, + "_dnslink.dloop2.example.com.": { + "dnslink=/ipns/loop1.example.com", + }, + "_dnslink.bad.example.com.": { + "dnslink=", + }, + "_dnslink.withsegment.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment", + }, + "_dnslink.withrecsegment.example.com.": { + "dnslink=/ipns/withsegment.example.com/subsub", + }, + "_dnslink.withtrailing.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/", + }, + "_dnslink.withtrailingrec.example.com.": { + "dnslink=/ipns/withtrailing.example.com/segment/", + }, + "_dnslink.double.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.double.conflict.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", + }, + "_dnslink.conflict.example.com.": { + "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjE", + }, + "_dnslink.fqdn.example.com.": { + "dnslink=/ipfs/QmYvMB9yrsSf7RKBghkfwmHJkzJhW2ZgVwq3LxBXXPasFr", + }, + "_dnslink.en.wikipedia-on-ipfs.org.": { + "dnslink=/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", + }, + "_dnslink.custom.non-icann.tldextravaganza.": { + "dnslink=/ipfs/bafybeieto6mcuvqlechv4iadoqvnffondeiwxc2bcfcewhvpsd2odvbmvm", + }, + "_dnslink.singlednslabelshouldbeok.": { + "dnslink=/ipfs/bafybeih4a6ylafdki6ailjrdvmr7o4fbbeceeeuty4v3qyyouiz5koqlpi", + }, + "_dnslink.www.wealdtech.eth.": { + "dnslink=/ipns/ipfs.example.com", + }, + }, + } +} + +func TestDNSResolution(t *testing.T) { + t.Parallel() + r := &DNSResolver{lookupTXT: newMockDNS().lookupTXT} + + for _, testCase := range []struct { + name string + depth uint + expectedPath string + expectedError error + }{ + {"/ipns/multihash.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/ipfs.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/dipfs.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/dns1.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/dns1.example.com", 1, "/ipns/ipfs.example.com", ErrResolveRecursion}, + {"/ipns/dns2.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/dns2.example.com", 1, "/ipns/dns1.example.com", ErrResolveRecursion}, + {"/ipns/dns2.example.com", 2, "/ipns/ipfs.example.com", ErrResolveRecursion}, + {"/ipns/multi.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/multi.example.com", 1, "/ipns/dns1.example.com", ErrResolveRecursion}, + {"/ipns/multi.example.com", 2, "/ipns/ipfs.example.com", ErrResolveRecursion}, + {"/ipns/multi-invalid.example.com", 2, "", ErrMultipleDNSLinkRecords}, + {"/ipns/multi-valid.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/equals.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/=equals", nil}, + {"/ipns/loop1.example.com", 1, "/ipns/loop2.example.com", ErrResolveRecursion}, + {"/ipns/loop1.example.com", 2, "/ipns/loop1.example.com", ErrResolveRecursion}, + {"/ipns/loop1.example.com", 3, "/ipns/loop2.example.com", ErrResolveRecursion}, + {"/ipns/loop1.example.com", DefaultDepthLimit, "/ipns/loop1.example.com", ErrResolveRecursion}, + {"/ipns/dloop1.example.com", 1, "/ipns/loop2.example.com", ErrResolveRecursion}, + {"/ipns/dloop1.example.com", 2, "/ipns/loop1.example.com", ErrResolveRecursion}, + {"/ipns/dloop1.example.com", 3, "/ipns/loop2.example.com", ErrResolveRecursion}, + {"/ipns/dloop1.example.com", DefaultDepthLimit, "/ipns/loop1.example.com", ErrResolveRecursion}, + {"/ipns/bad.example.com", DefaultDepthLimit, "", ErrResolveFailed}, + {"/ipns/bad.example.com", DefaultDepthLimit, "", ErrResolveFailed}, + {"/ipns/withsegment.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment", nil}, + {"/ipns/withrecsegment.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub", nil}, + {"/ipns/withsegment.example.com/test1", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/test1", nil}, + {"/ipns/withrecsegment.example.com/test2", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub/test2", nil}, + {"/ipns/withrecsegment.example.com/test3/", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub/test3/", nil}, + {"/ipns/withtrailingrec.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/", nil}, + {"/ipns/double.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/conflict.example.com", DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjE", nil}, + {"/ipns/fqdn.example.com.", DefaultDepthLimit, "/ipfs/QmYvMB9yrsSf7RKBghkfwmHJkzJhW2ZgVwq3LxBXXPasFr", nil}, + {"/ipns/en.wikipedia-on-ipfs.org", 2, "/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", nil}, + {"/ipns/custom.non-icann.tldextravaganza.", 2, "/ipfs/bafybeieto6mcuvqlechv4iadoqvnffondeiwxc2bcfcewhvpsd2odvbmvm", nil}, + {"/ipns/singlednslabelshouldbeok", 2, "/ipfs/bafybeih4a6ylafdki6ailjrdvmr7o4fbbeceeeuty4v3qyyouiz5koqlpi", nil}, + {"/ipns/www.wealdtech.eth", 1, "/ipns/ipfs.example.com", ErrResolveRecursion}, + {"/ipns/www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + {"/ipns/www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil}, + } { + t.Run(testCase.name, func(t *testing.T) { + testResolution(t, r, testCase.name, (testCase.depth), testCase.expectedPath, 0, testCase.expectedError) + }) + } +} diff --git a/namesys/dns_test.go b/namesys/dns_test.go deleted file mode 100644 index a31a53582..000000000 --- a/namesys/dns_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package namesys - -import ( - "context" - "fmt" - "testing" - - opts "github.com/ipfs/boxo/coreiface/options/namesys" -) - -type mockDNS struct { - entries map[string][]string -} - -func (m *mockDNS) lookupTXT(ctx context.Context, name string) (txt []string, err error) { - txt, ok := m.entries[name] - if !ok { - return nil, fmt.Errorf("no TXT entry for %s", name) - } - return txt, nil -} - -func TestDnsEntryParsing(t *testing.T) { - goodEntries := []string{ - "QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - "dnslink=/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo", - "dnslink=/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/bar", - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo/bar/baz", - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo/bar/baz/", - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - } - - badEntries := []string{ - "QmYhE8xgFCjGcz6PHgnvJz5NOTCORRECT", - "quux=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - "dnslink=", - "dnslink=/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/foo", - "dnslink=ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/bar", - } - - for _, e := range goodEntries { - _, err := parseEntry(e) - if err != nil { - t.Log("expected entry to parse correctly!") - t.Log(e) - t.Fatal(err) - } - } - - for _, e := range badEntries { - _, err := parseEntry(e) - if err == nil { - t.Log("expected entry parse to fail!") - t.Fatal(err) - } - } -} - -func newMockDNS() *mockDNS { - return &mockDNS{ - entries: map[string][]string{ - "multihash.example.com.": { - "dnslink=QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - }, - "ipfs.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - }, - "_dnslink.dipfs.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - }, - "dns1.example.com.": { - "dnslink=/ipns/ipfs.example.com", - }, - "dns2.example.com.": { - "dnslink=/ipns/dns1.example.com", - }, - "multi.example.com.": { - "some stuff", - "dnslink=/ipns/dns1.example.com", - "masked dnslink=/ipns/example.invalid", - }, - "equals.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/=equals", - }, - "loop1.example.com.": { - "dnslink=/ipns/loop2.example.com", - }, - "loop2.example.com.": { - "dnslink=/ipns/loop1.example.com", - }, - "_dnslink.dloop1.example.com.": { - "dnslink=/ipns/loop2.example.com", - }, - "_dnslink.dloop2.example.com.": { - "dnslink=/ipns/loop1.example.com", - }, - "bad.example.com.": { - "dnslink=", - }, - "withsegment.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment", - }, - "withrecsegment.example.com.": { - "dnslink=/ipns/withsegment.example.com/subsub", - }, - "withtrailing.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/", - }, - "withtrailingrec.example.com.": { - "dnslink=/ipns/withtrailing.example.com/segment/", - }, - "double.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - }, - "_dnslink.double.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - }, - "double.conflict.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", - }, - "_dnslink.conflict.example.com.": { - "dnslink=/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjE", - }, - "fqdn.example.com.": { - "dnslink=/ipfs/QmYvMB9yrsSf7RKBghkfwmHJkzJhW2ZgVwq3LxBXXPasFr", - }, - "en.wikipedia-on-ipfs.org.": { - "dnslink=/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", - }, - "custom.non-icann.tldextravaganza.": { - "dnslink=/ipfs/bafybeieto6mcuvqlechv4iadoqvnffondeiwxc2bcfcewhvpsd2odvbmvm", - }, - "singlednslabelshouldbeok.": { - "dnslink=/ipfs/bafybeih4a6ylafdki6ailjrdvmr7o4fbbeceeeuty4v3qyyouiz5koqlpi", - }, - "www.wealdtech.eth.": { - "dnslink=/ipns/ipfs.example.com", - }, - }, - } -} - -func TestDNSResolution(t *testing.T) { - mock := newMockDNS() - r := &DNSResolver{lookupTXT: mock.lookupTXT} - testResolution(t, r, "multihash.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "ipfs.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "dipfs.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "dns1.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "dns1.example.com", 1, "/ipns/ipfs.example.com", ErrResolveRecursion) - testResolution(t, r, "dns2.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "dns2.example.com", 1, "/ipns/dns1.example.com", ErrResolveRecursion) - testResolution(t, r, "dns2.example.com", 2, "/ipns/ipfs.example.com", ErrResolveRecursion) - testResolution(t, r, "multi.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "multi.example.com", 1, "/ipns/dns1.example.com", ErrResolveRecursion) - testResolution(t, r, "multi.example.com", 2, "/ipns/ipfs.example.com", ErrResolveRecursion) - testResolution(t, r, "equals.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/=equals", nil) - testResolution(t, r, "loop1.example.com", 1, "/ipns/loop2.example.com", ErrResolveRecursion) - testResolution(t, r, "loop1.example.com", 2, "/ipns/loop1.example.com", ErrResolveRecursion) - testResolution(t, r, "loop1.example.com", 3, "/ipns/loop2.example.com", ErrResolveRecursion) - testResolution(t, r, "loop1.example.com", opts.DefaultDepthLimit, "/ipns/loop1.example.com", ErrResolveRecursion) - testResolution(t, r, "dloop1.example.com", 1, "/ipns/loop2.example.com", ErrResolveRecursion) - testResolution(t, r, "dloop1.example.com", 2, "/ipns/loop1.example.com", ErrResolveRecursion) - testResolution(t, r, "dloop1.example.com", 3, "/ipns/loop2.example.com", ErrResolveRecursion) - testResolution(t, r, "dloop1.example.com", opts.DefaultDepthLimit, "/ipns/loop1.example.com", ErrResolveRecursion) - testResolution(t, r, "bad.example.com", opts.DefaultDepthLimit, "", ErrResolveFailed) - testResolution(t, r, "withsegment.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment", nil) - testResolution(t, r, "withrecsegment.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub", nil) - testResolution(t, r, "withsegment.example.com/test1", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/test1", nil) - testResolution(t, r, "withrecsegment.example.com/test2", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub/test2", nil) - testResolution(t, r, "withrecsegment.example.com/test3/", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/subsub/test3/", nil) - testResolution(t, r, "withtrailingrec.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD/sub/segment/", nil) - testResolution(t, r, "double.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "conflict.example.com", opts.DefaultDepthLimit, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjE", nil) - testResolution(t, r, "fqdn.example.com.", opts.DefaultDepthLimit, "/ipfs/QmYvMB9yrsSf7RKBghkfwmHJkzJhW2ZgVwq3LxBXXPasFr", nil) - testResolution(t, r, "en.wikipedia-on-ipfs.org", 2, "/ipfs/bafybeiaysi4s6lnjev27ln5icwm6tueaw2vdykrtjkwiphwekaywqhcjze", nil) - testResolution(t, r, "custom.non-icann.tldextravaganza.", 2, "/ipfs/bafybeieto6mcuvqlechv4iadoqvnffondeiwxc2bcfcewhvpsd2odvbmvm", nil) - testResolution(t, r, "singlednslabelshouldbeok", 2, "/ipfs/bafybeih4a6ylafdki6ailjrdvmr7o4fbbeceeeuty4v3qyyouiz5koqlpi", nil) - testResolution(t, r, "www.wealdtech.eth", 1, "/ipns/ipfs.example.com", ErrResolveRecursion) - testResolution(t, r, "www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) - testResolution(t, r, "www.wealdtech.eth", 2, "/ipfs/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", nil) -} diff --git a/namesys/interface.go b/namesys/interface.go index 5d50936ee..1befee855 100644 --- a/namesys/interface.go +++ b/namesys/interface.go @@ -1,52 +1,57 @@ -/* -Package namesys implements resolvers and publishers for the IPFS -naming system (IPNS). - -The core of IPFS is an immutable, content-addressable Merkle graph. -That works well for many use cases, but doesn't allow you to answer -questions like "what is Alice's current homepage?". The mutable name -system allows Alice to publish information like: - - The current homepage for alice.example.com is - /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj - -or: - - The current homepage for node - QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy - is - /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj - -The mutable name system also allows users to resolve those references -to find the immutable IPFS object currently referenced by a given -mutable name. - -For command-line bindings to this functionality, see: - - ipfs name - ipfs dns - ipfs resolve -*/ package namesys import ( "context" "errors" + "fmt" + "time" - opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" + logging "github.com/ipfs/go-log/v2" ci "github.com/libp2p/go-libp2p/core/crypto" ) -// ErrResolveFailed signals an error when attempting to resolve. -var ErrResolveFailed = errors.New("could not resolve name") +var log = logging.Logger("namesys") + +var ( + // ErrResolveFailed signals an error when attempting to resolve. + ErrResolveFailed = errors.New("could not resolve name") + + // ErrResolveRecursion signals a recursion-depth limit. + ErrResolveRecursion = errors.New("could not resolve name (recursion limit exceeded)") + + // ErrNoNamesys is an explicit error for when no [NameSystem] is provided. + ErrNoNamesys = errors.New("no namesys has been provided") + + // ErrMultipleDNSLinkRecords signals that the domain had multiple valid DNSLink TXT entries. + ErrMultipleDNSLinkRecords = fmt.Errorf("%w: DNSLink lookup returned more than one IPFS content path; ask domain owner to remove duplicate TXT records", ErrResolveFailed) + + // ErrMissingDNSLinkRecord signals that the domain has no DNSLink TXT entries. + ErrMissingDNSLinkRecord = fmt.Errorf("%w: DNSLink lookup could not find a TXT record (https://docs.ipfs.tech/concepts/dnslink/)", ErrResolveFailed) +) + +const ( + // DefaultDepthLimit is the default depth limit used by [Resolver]. + DefaultDepthLimit = 32 -// ErrResolveRecursion signals a recursion-depth limit. -var ErrResolveRecursion = errors.New( - "could not resolve name (recursion limit exceeded)") + // UnlimitedDepth allows infinite recursion in [Resolver]. You probably don't want + // to use this, but it's here if you absolutely trust resolution to eventually + // complete and can't put an upper limit on how many steps it will take. + UnlimitedDepth = 0 -// ErrPublishFailed signals an error when attempting to publish. -var ErrPublishFailed = errors.New("could not publish name") + // DefaultResolverRecordCount is the number of IPNS Record copies to + // retrieve from the routing system like Amino DHT (the best record is + // selected from this set). + DefaultResolverDhtRecordCount = 16 + + // DefaultResolverDhtTimeout is the amount of time to wait for records to be fetched + // and verified. + DefaultResolverDhtTimeout = time.Minute + + // DefaultResolverCacheTTL defines default TTL of a record placed in [NameSystem] cache. + DefaultResolverCacheTTL = time.Minute +) // NameSystem represents a cohesive name publishing and resolving system. // @@ -60,39 +65,169 @@ type NameSystem interface { Publisher } -// Result is the return type for Resolver.ResolveAsync. +// Result is the return type for [Resolver.Resolve]. type Result struct { - Path path.Path - Err error + Path path.Path + TTL time.Duration + LastMod time.Time +} + +// AsyncResult is the return type for [Resolver.ResolveAsync]. +type AsyncResult struct { + Path path.Path + TTL time.Duration + LastMod time.Time + Err error } // Resolver is an object capable of resolving names. type Resolver interface { - // Resolve performs a recursive lookup, returning the dereferenced - // path. For example, if ipfs.io has a DNS TXT record pointing to + // Resolve performs a recursive lookup, returning the dereferenced path and the TTL. + // If the TTL is unknown, then a TTL of 0 is returned. For example, if example.com + // has a DNS TXT record pointing to: + // // /ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy + // // and there is a DHT IPNS entry for + // // QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy // -> /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj + // // then + // // Resolve(ctx, "/ipns/ipfs.io") + // // will resolve both names, returning + // // /ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj // - // There is a default depth-limit to avoid infinite recursion. Most - // users will be fine with this default limit, but if you need to - // adjust the limit you can specify it as an option. - Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (value path.Path, err error) + // There is a default depth-limit to avoid infinite recursion. Most users will be fine with + // this default limit, but if you need to adjust the limit you can specify it as an option. + Resolve(context.Context, path.Path, ...ResolveOption) (Result, error) + + // ResolveAsync performs recursive name lookup, like Resolve, but it returns entries as + // they are discovered in the DHT. Each returned result is guaranteed to be "better" + // (which usually means newer) than the previous one. + ResolveAsync(context.Context, path.Path, ...ResolveOption) <-chan AsyncResult +} + +// ResolveOptions specifies options for resolving an IPNS Path. +type ResolveOptions struct { + // Depth is the recursion depth limit. + Depth uint - // ResolveAsync performs recursive name lookup, like Resolve, but it returns - // entries as they are discovered in the DHT. Each returned result is guaranteed - // to be "better" (which usually means newer) than the previous one. - ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result + // DhtRecordCount is the number of IPNS Records to retrieve from the routing system + // (the best record is selected from this set). + DhtRecordCount uint + + // DhtTimeout is the amount of time to wait for records to be fetched and + // verified. A zero value indicates that there is no explicit timeout + // (although there may be an implicit timeout due to dial timeouts within + // the specific routing system like DHT). + DhtTimeout time.Duration +} + +// DefaultResolveOptions returns the default options for resolving an IPNS Path. +func DefaultResolveOptions() ResolveOptions { + return ResolveOptions{ + Depth: DefaultDepthLimit, + DhtRecordCount: DefaultResolverDhtRecordCount, + DhtTimeout: DefaultResolverDhtTimeout, + } +} + +// ResolveOption is used to set a resolve option. +type ResolveOption func(*ResolveOptions) + +// ResolveWithDepth sets [ResolveOptions.Depth]. +func ResolveWithDepth(depth uint) ResolveOption { + return func(o *ResolveOptions) { + o.Depth = depth + } +} + +// ResolveWithDhtRecordCount sets [ResolveOptions.DhtRecordCount]. +func ResolveWithDhtRecordCount(count uint) ResolveOption { + return func(o *ResolveOptions) { + o.DhtRecordCount = count + } +} + +// ResolveWithDhtTimeout sets [ResolveOptions.ResolveWithDhtTimeout]. +func ResolveWithDhtTimeout(timeout time.Duration) ResolveOption { + return func(o *ResolveOptions) { + o.DhtTimeout = timeout + } +} + +// ProcessResolveOptions converts an array of [ResolveOption] into a [ResolveOptions] object. +func ProcessResolveOptions(opts []ResolveOption) ResolveOptions { + resolveOptions := DefaultResolveOptions() + for _, option := range opts { + option(&resolveOptions) + } + return resolveOptions } // Publisher is an object capable of publishing particular names. type Publisher interface { - // Publish establishes a name-value mapping. - // TODO make this not PrivKey specific. - Publish(ctx context.Context, name ci.PrivKey, value path.Path, options ...opts.PublishOption) error + // Publish publishes the given value under the name represented by the given private key. + Publish(ctx context.Context, sk ci.PrivKey, value path.Path, options ...PublishOption) error +} + +// PublishOptions specifies options for publishing an IPNS Record. +type PublishOptions struct { + // EOL defines for how long the published value is valid. + EOL time.Time + + // TTL defines for how long the published value is cached locally before checking for updates. + TTL time.Duration + + // IPNSOptions are options passed by [IPNSPublisher] to [ipns.NewRecord] when + // creating a new record to publish. With this options, you can further customize + // the way IPNS Records are created. + IPNSOptions []ipns.Option +} + +// DefaultPublishOptions returns the default options for publishing an IPNS Record. +func DefaultPublishOptions() PublishOptions { + return PublishOptions{ + EOL: time.Now().Add(ipns.DefaultRecordLifetime), + TTL: ipns.DefaultRecordTTL, + } +} + +// PublishOption is used to set an option for [PublishOptions]. +type PublishOption func(*PublishOptions) + +// PublishWithEOL sets [PublishOptions.EOL]. +func PublishWithEOL(eol time.Time) PublishOption { + return func(o *PublishOptions) { + o.EOL = eol + } +} + +// PublishWithEOL sets [PublishOptions.TTL]. +func PublishWithTTL(ttl time.Duration) PublishOption { + return func(o *PublishOptions) { + o.TTL = ttl + } +} + +// PublishWithIPNSOption adds an [ipns.Option] to [PublishOptions.IPNSOptions]. +// These options are used by [IPNSPublisher], which passes them onto the IPNS +// record creation at [ipns.NewRecord] +func PublishWithIPNSOption(option ipns.Option) PublishOption { + return func(o *PublishOptions) { + o.IPNSOptions = append(o.IPNSOptions, option) + } +} + +// ProcessPublishOptions converts an array of [PublishOption] into a [PublishOptions] object. +func ProcessPublishOptions(opts []PublishOption) PublishOptions { + publishOptions := DefaultPublishOptions() + for _, option := range opts { + option(&publishOptions) + } + return publishOptions } diff --git a/namesys/ipns_publisher.go b/namesys/ipns_publisher.go new file mode 100644 index 000000000..74ef35853 --- /dev/null +++ b/namesys/ipns_publisher.go @@ -0,0 +1,283 @@ +package namesys + +import ( + "context" + "errors" + "strings" + "sync" + "time" + + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" + ds "github.com/ipfs/go-datastore" + dsquery "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/whyrusleeping/base32" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// IPNSPublisher implements [Publisher] for IPNS Records. +type IPNSPublisher struct { + routing routing.ValueStore + ds ds.Datastore + + // Used to ensure we assign IPNS records sequential sequence numbers. + mu sync.Mutex +} + +var _ Publisher = &IPNSPublisher{} + +// NewIPNSResolver constructs a new [IPNSResolver] from a [routing.ValueStore] and +// a [ds.Datastore]. +func NewIPNSPublisher(route routing.ValueStore, ds ds.Datastore) *IPNSPublisher { + if ds == nil { + panic("nil datastore") + } + + return &IPNSPublisher{routing: route, ds: ds} +} + +func (p *IPNSPublisher) Publish(ctx context.Context, priv crypto.PrivKey, value path.Path, options ...PublishOption) error { + log.Debugf("Publish %s", value) + + ctx, span := startSpan(ctx, "IPNSPublisher.Publish", trace.WithAttributes(attribute.String("Value", value.String()))) + defer span.End() + + record, err := p.updateRecord(ctx, priv, value, options...) + if err != nil { + return err + } + + return PublishIPNSRecord(ctx, p.routing, priv.GetPublic(), record) +} + +// IpnsDsKey returns a datastore key given an IPNS identifier (peer +// ID). Defines the storage key for IPNS records in the local datastore. +func IpnsDsKey(name ipns.Name) ds.Key { + return ds.NewKey("/ipns/" + base32.RawStdEncoding.EncodeToString([]byte(name.Peer()))) +} + +// ListPublished returns the latest IPNS records published by this node and +// their expiration times. +// +// This method will not search the routing system for records published by other +// nodes. +func (p *IPNSPublisher) ListPublished(ctx context.Context) (map[ipns.Name]*ipns.Record, error) { + query, err := p.ds.Query(ctx, dsquery.Query{ + Prefix: ipns.NamespacePrefix, + }) + if err != nil { + return nil, err + } + defer query.Close() + + records := make(map[ipns.Name]*ipns.Record) + for { + select { + case result, ok := <-query.Next(): + if !ok { + return records, nil + } + if result.Error != nil { + return nil, result.Error + } + rec, err := ipns.UnmarshalRecord(result.Value) + if err != nil { + // Might as well return what we can. + log.Error("found an invalid IPNS entry:", err) + continue + } + if !strings.HasPrefix(result.Key, ipns.NamespacePrefix) { + log.Errorf("datastore query for keys with prefix %s returned a key: %s", ipns.NamespacePrefix, result.Key) + continue + } + k := result.Key[len(ipns.NamespacePrefix):] + pid, err := base32.RawStdEncoding.DecodeString(k) + if err != nil { + log.Errorf("ipns ds key invalid: %s", result.Key) + continue + } + records[ipns.NameFromPeer(peer.ID(pid))] = rec + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +// GetPublished returns the record this node has published corresponding to the +// given peer ID. +// +// If `checkRouting` is true and we have no existing record, this method will +// check the routing system for any existing records. +func (p *IPNSPublisher) GetPublished(ctx context.Context, name ipns.Name, checkRouting bool) (*ipns.Record, error) { + ctx, cancel := context.WithTimeout(ctx, time.Second*30) + defer cancel() + + value, err := p.ds.Get(ctx, IpnsDsKey(name)) + switch err { + case nil: + case ds.ErrNotFound: + if !checkRouting { + return nil, nil + } + routingKey := name.RoutingKey() + value, err = p.routing.GetValue(ctx, string(routingKey)) + if err != nil { + // Not found or other network issue. Can't really do + // anything about this case. + if err != routing.ErrNotFound { + log.Debugf("error when determining the last published IPNS record for %s: %s", name, err) + } + + return nil, nil + } + default: + return nil, err + } + + return ipns.UnmarshalRecord(value) +} + +func (p *IPNSPublisher) updateRecord(ctx context.Context, k crypto.PrivKey, value path.Path, options ...PublishOption) (*ipns.Record, error) { + id, err := peer.IDFromPrivateKey(k) + if err != nil { + return nil, err + } + name := ipns.NameFromPeer(id) + + p.mu.Lock() + defer p.mu.Unlock() + + // get previous records sequence number + rec, err := p.GetPublished(ctx, name, true) + if err != nil { + return nil, err + } + + seq := uint64(0) + if rec != nil { + seq, err = rec.Sequence() + if err != nil { + return nil, err + } + + p, err := rec.Value() + if err != nil { + return nil, err + } + if value.String() != p.String() { + // Don't bother incrementing the sequence number unless the + // value changes. + seq++ + } + } + + opts := ProcessPublishOptions(options) + + // Create record + r, err := ipns.NewRecord(k, value, seq, opts.EOL, opts.TTL, opts.IPNSOptions...) + if err != nil { + return nil, err + } + + data, err := ipns.MarshalRecord(r) + if err != nil { + return nil, err + } + + // Put the new record. + dsKey := IpnsDsKey(name) + if err := p.ds.Put(ctx, dsKey, data); err != nil { + return nil, err + } + if err := p.ds.Sync(ctx, dsKey); err != nil { + return nil, err + } + + return r, nil +} + +// PublishIPNSRecord publishes the given [ipns.Record] for the provided [crypto.PubKey] in +// the provided [routing.ValueStore]. The public key is also made available to the routing +// system if it cannot be derived from the corresponding [peer.ID]. +func PublishIPNSRecord(ctx context.Context, r routing.ValueStore, pubKey crypto.PubKey, rec *ipns.Record) error { + ctx, span := startSpan(ctx, "PublishIPNSRecord") + defer span.End() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + errs := make(chan error, 2) // At most two errors (IPNS, and public key) + + pid, err := peer.IDFromPublicKey(pubKey) + if err != nil { + return err + } + + go func() { + errs <- PutIPNSRecord(ctx, r, ipns.NameFromPeer(pid), rec) + }() + + // Publish the public key if the public key cannot be extracted from the peer ID. + // This is most likely not necessary since IPNS Records include, by default, the public + // key in those cases. However, this ensures it's still possible to easily retrieve + // the public key if, for some reason, it is not embedded. + if _, err := pid.ExtractPublicKey(); errors.Is(err, peer.ErrNoPublicKey) { + go func() { + errs <- PutPublicKey(ctx, r, pid, pubKey) + }() + + if err := waitOnErrChan(ctx, errs); err != nil { + return err + } + } + + return waitOnErrChan(ctx, errs) +} + +func waitOnErrChan(ctx context.Context, errs chan error) error { + select { + case err := <-errs: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// PutPublicKey puts the given [crypto.PubKey] for the given [peer.ID] in the [routing.ValueStore]. +func PutPublicKey(ctx context.Context, r routing.ValueStore, pid peer.ID, pubKey crypto.PubKey) error { + routingKey := PkRoutingKey(pid) + ctx, span := startSpan(ctx, "PutPublicKey", trace.WithAttributes(attribute.String("Key", routingKey))) + defer span.End() + + bytes, err := crypto.MarshalPublicKey(pubKey) + if err != nil { + return err + } + + log.Debugf("Storing public key at: %x", routingKey) + return r.PutValue(ctx, routingKey, bytes) +} + +// PkRoutingKey returns the public key routing key for the given [peer.ID]. +func PkRoutingKey(id peer.ID) string { + return "/pk/" + string(id) +} + +// PutIPNSRecord puts the given [ipns.Record] for the given [ipns.Name] in the [routing.ValueStore]. +func PutIPNSRecord(ctx context.Context, r routing.ValueStore, name ipns.Name, rec *ipns.Record) error { + routingKey := string(name.RoutingKey()) + ctx, span := startSpan(ctx, "PutIPNSRecord", trace.WithAttributes(attribute.String("IPNSKey", routingKey))) + defer span.End() + + bytes, err := ipns.MarshalRecord(rec) + if err != nil { + return err + } + + log.Debugf("Storing ipns record at: %x", routingKey) + return r.PutValue(ctx, routingKey, bytes) +} diff --git a/namesys/ipns_publisher_test.go b/namesys/ipns_publisher_test.go new file mode 100644 index 000000000..a783ddd61 --- /dev/null +++ b/namesys/ipns_publisher_test.go @@ -0,0 +1,121 @@ +package namesys + +import ( + "context" + "crypto/rand" + "testing" + "time" + + "github.com/ipfs/boxo/path" + "github.com/stretchr/testify/require" + + dshelp "github.com/ipfs/boxo/datastore/dshelp" + "github.com/ipfs/boxo/ipns" + mockrouting "github.com/ipfs/boxo/routing/mock" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + testutil "github.com/libp2p/go-libp2p-testing/net" + ci "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" +) + +func TestIPNSPublisher(t *testing.T) { + t.Parallel() + + test := func(t *testing.T, keyType int, expectedErr error, expectedExistence bool) { + ctx := context.Background() + + // Create test identity + privKey, pubKey, err := ci.GenerateKeyPairWithReader(keyType, 2048, rand.Reader) + require.NoError(t, err) + + pid, err := peer.IDFromPublicKey(pubKey) + require.NoError(t, err) + + // Create IPNS Record + value, err := path.NewPath("/ipfs/bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4") + require.NoError(t, err) + rec, err := ipns.NewRecord(privKey, value, 0, time.Now().Add(24*time.Hour), 0) + require.NoError(t, err) + + // Routing value store + dstore := dssync.MutexWrap(ds.NewMapDatastore()) + serv := mockrouting.NewServer() + r := serv.ClientWithDatastore(context.Background(), testutil.NewIdentity(pid, testutil.ZeroLocalTCPAddress, privKey, pubKey), dstore) + + // Publish IPNS Record + err = PublishIPNSRecord(ctx, r, pubKey, rec) + require.NoError(t, err) + + // Check if IPNS Record is stored in value store + _, err = r.GetValue(ctx, string(ipns.NameFromPeer(pid).RoutingKey())) + require.NoError(t, err) + + key := dshelp.NewKeyFromBinary(ipns.NameFromPeer(pid).RoutingKey()) + exists, err := dstore.Has(ctx, key) + require.NoError(t, err) + require.True(t, exists) + + // Check for Public Key is stored in value store + pkRoutingKey := PkRoutingKey(pid) + _, err = r.GetValue(ctx, pkRoutingKey) + require.ErrorIs(t, err, expectedErr) + + // Check if Public Key is in data store for completeness + key = dshelp.NewKeyFromBinary([]byte(pkRoutingKey)) + exists, err = dstore.Has(ctx, key) + require.NoError(t, err) + require.Equal(t, expectedExistence, exists) + } + + t.Run("RSA", func(t *testing.T) { + t.Parallel() + test(t, ci.RSA, nil, true) + }) + + t.Run("Ed22519", func(t *testing.T) { + t.Parallel() + test(t, ci.Ed25519, ds.ErrNotFound, false) + }) +} + +func TestAsyncDS(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rt := mockrouting.NewServer().Client(testutil.RandIdentityOrFatal(t)) + ds := &checkSyncDS{ + Datastore: ds.NewMapDatastore(), + syncKeys: make(map[ds.Key]struct{}), + } + publisher := NewIPNSPublisher(rt, ds) + + ipnsFakeID := testutil.RandIdentityOrFatal(t) + ipnsVal, err := path.NewPath("/ipns/foo.bar") + require.NoError(t, err) + + err = publisher.Publish(ctx, ipnsFakeID.PrivateKey(), ipnsVal) + require.NoError(t, err) + + ipnsKey := IpnsDsKey(ipns.NameFromPeer(ipnsFakeID.ID())) + + for k := range ds.syncKeys { + if k.IsAncestorOf(ipnsKey) || k.Equal(ipnsKey) { + return + } + } + + t.Fatal("ipns key not synced") +} + +type checkSyncDS struct { + ds.Datastore + syncKeys map[ds.Key]struct{} +} + +func (d *checkSyncDS) Sync(ctx context.Context, prefix ds.Key) error { + d.syncKeys[prefix] = struct{}{} + return d.Datastore.Sync(ctx, prefix) +} diff --git a/namesys/ipns_resolver.go b/namesys/ipns_resolver.go new file mode 100644 index 000000000..5efcf8785 --- /dev/null +++ b/namesys/ipns_resolver.go @@ -0,0 +1,158 @@ +package namesys + +import ( + "context" + "fmt" + "time" + + "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" + + dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p/core/routing" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// IPNSResolver implements [Resolver] for IPNS Records. This resolver always returns +// a TTL if the record is still valid. It happens as follows: +// +// 1. Provisory TTL is chosen: record TTL if it exists, otherwise `ipns.DefaultRecordTTL`. +// 2. If provisory TTL expires before EOL, then returned TTL is duration between EOL and now. +// 3. If record is expired, 0 is returned as TTL. +type IPNSResolver struct { + routing routing.ValueStore +} + +var _ Resolver = &IPNSResolver{} + +// NewIPNSResolver constructs a new [IPNSResolver] from a [routing.ValueStore]. +func NewIPNSResolver(route routing.ValueStore) *IPNSResolver { + if route == nil { + panic("attempt to create resolver with nil routing system") + } + + return &IPNSResolver{ + routing: route, + } +} + +func (r *IPNSResolver) Resolve(ctx context.Context, p path.Path, options ...ResolveOption) (Result, error) { + ctx, span := startSpan(ctx, "IPNSResolver.Resolve", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + return resolve(ctx, r, p, ProcessResolveOptions(options)) +} + +func (r *IPNSResolver) ResolveAsync(ctx context.Context, p path.Path, options ...ResolveOption) <-chan AsyncResult { + ctx, span := startSpan(ctx, "IPNSResolver.ResolveAsync", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + return resolveAsync(ctx, r, p, ProcessResolveOptions(options)) +} + +func (r *IPNSResolver) resolveOnceAsync(ctx context.Context, p path.Path, options ResolveOptions) <-chan AsyncResult { + ctx, span := startSpan(ctx, "IPNSResolver.ResolveOnceAsync", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + out := make(chan AsyncResult, 1) + if p.Namespace() != path.IPNSNamespace { + out <- AsyncResult{Err: fmt.Errorf("unsupported namespace: %s", p.Namespace())} + close(out) + return out + } + + cancel := func() {} + if options.DhtTimeout != 0 { + // Resolution must complete within the timeout + ctx, cancel = context.WithTimeout(ctx, options.DhtTimeout) + } + + name, err := ipns.NameFromString(p.Segments()[1]) + if err != nil { + out <- AsyncResult{Err: err} + close(out) + cancel() + return out + } + + vals, err := r.routing.SearchValue(ctx, string(name.RoutingKey()), dht.Quorum(int(options.DhtRecordCount))) + if err != nil { + out <- AsyncResult{Err: err} + close(out) + cancel() + return out + } + + go func() { + defer cancel() + defer close(out) + ctx, span := startSpan(ctx, "IPNSResolver.ResolveOnceAsync.Worker") + defer span.End() + + for { + select { + case val, ok := <-vals: + if !ok { + return + } + + rec, err := ipns.UnmarshalRecord(val) + if err != nil { + emitOnceResult(ctx, out, AsyncResult{Err: err}) + return + } + + resolvedBase, err := rec.Value() + if err != nil { + emitOnceResult(ctx, out, AsyncResult{Err: err}) + return + } + + resolvedBase, err = joinPaths(resolvedBase, p) + if err != nil { + emitOnceResult(ctx, out, AsyncResult{Err: err}) + return + } + + ttl, err := calculateBestTTL(rec) + if err != nil { + emitOnceResult(ctx, out, AsyncResult{Err: err}) + return + } + + // TODO: in the future it would be interesting to set the last modified date + // as the date in which the record has been signed. + emitOnceResult(ctx, out, AsyncResult{Path: resolvedBase, TTL: ttl, LastMod: time.Now()}) + case <-ctx.Done(): + return + } + } + }() + + return out +} + +func calculateBestTTL(rec *ipns.Record) (time.Duration, error) { + ttl := DefaultResolverCacheTTL + if recordTTL, err := rec.TTL(); err == nil { + ttl = recordTTL + } + + switch eol, err := rec.Validity(); err { + case ipns.ErrUnrecognizedValidity: + // No EOL. + case nil: + ttEol := time.Until(eol) + if ttEol < 0 { + // It *was* valid when we first resolved it. + ttl = 0 + } else if ttEol < ttl { + ttl = ttEol + } + default: + return 0, err + } + + return ttl, nil +} diff --git a/namesys/ipns_resolver_test.go b/namesys/ipns_resolver_test.go new file mode 100644 index 000000000..c7dcd3b97 --- /dev/null +++ b/namesys/ipns_resolver_test.go @@ -0,0 +1,131 @@ +package namesys + +import ( + "context" + "testing" + "time" + + ipns "github.com/ipfs/boxo/ipns" + "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/routing/offline" + "github.com/ipfs/go-cid" + ds "github.com/ipfs/go-datastore" + dssync "github.com/ipfs/go-datastore/sync" + record "github.com/libp2p/go-libp2p-record" + tnet "github.com/libp2p/go-libp2p-testing/net" + "github.com/libp2p/go-libp2p/core/routing" + "github.com/stretchr/testify/require" +) + +type noFailValidator struct{} + +func (v noFailValidator) Validate(key string, value []byte) error { + return nil +} + +func (v noFailValidator) Select(key string, values [][]byte) (int, error) { + return 0, nil +} + +func TestResolver(t *testing.T) { + t.Parallel() + + pathCat := path.FromCid(cid.MustParse("bafkqabddmf2au")) + pathDog := path.FromCid(cid.MustParse("bafkqabden5tqu")) + + makeResolverDependencies := func() (tnet.Identity, ipns.Name, ds.Datastore, routing.ValueStore) { + ds := dssync.MutexWrap(ds.NewMapDatastore()) + id := tnet.RandIdentityOrFatal(t) + r := offline.NewOfflineRouter(ds, record.NamespacedValidator{ + "ipns": ipns.Validator{}, // No need for KeyBook, as records created by NameSys include PublicKey for RSA. + "pk": record.PublicKeyValidator{}, + }) + + return id, ipns.NameFromPeer(id.ID()), ds, r + } + + t.Run("Publish and resolve", func(t *testing.T) { + t.Parallel() + + id, name, ds, r := makeResolverDependencies() + resolver := NewIPNSResolver(r) + publisher := NewIPNSPublisher(r, ds) + + err := publisher.Publish(context.Background(), id.PrivateKey(), pathCat) + require.NoError(t, err) + + res, err := resolver.Resolve(context.Background(), name.AsPath()) + require.NoError(t, err) + require.Equal(t, pathCat, res.Path) + }) + + t.Run("Resolve does not return expired record", func(t *testing.T) { + t.Parallel() + + id, name, ds, r := makeResolverDependencies() + resolver := NewIPNSResolver(r) + + // Create a "bad" publisher that allows to publish expired records. + publisher := NewIPNSPublisher(offline.NewOfflineRouter(ds, record.NamespacedValidator{ + "ipns": noFailValidator{}, + "pk": record.PublicKeyValidator{}, + }), ds) + + // Publish expired. + eol := time.Now().Add(time.Hour * -1) + err := publisher.Publish(context.Background(), id.PrivateKey(), pathCat, PublishWithEOL(eol)) + require.NoError(t, err) + + // Expect to not be able to resolve. + _, err = resolver.Resolve(context.Background(), name.AsPath()) + require.ErrorIs(t, err, ErrResolveFailed) + }) + + t.Run("Resolve prefers non-expired record", func(t *testing.T) { + t.Parallel() + + id, name, ds, r := makeResolverDependencies() + resolver := NewIPNSResolver(r) + + // Create a "bad" publisher that allows to publish expired records. + publisher := NewIPNSPublisher(offline.NewOfflineRouter(ds, record.NamespacedValidator{ + "ipns": noFailValidator{}, + "pk": record.PublicKeyValidator{}, + }), ds) + + // Publish expired. + eol := time.Now().Add(time.Hour * -1) + err := publisher.Publish(context.Background(), id.PrivateKey(), pathCat, PublishWithEOL(eol)) + require.NoError(t, err) + + // Publish new. + err = publisher.Publish(context.Background(), id.PrivateKey(), pathDog) + require.NoError(t, err) + + // Expect new. + res, err := resolver.Resolve(context.Background(), name.AsPath()) + require.NoError(t, err) + require.Equal(t, pathDog, res.Path) + }) + + t.Run("Resolve prefers newer record", func(t *testing.T) { + t.Parallel() + + id, name, ds, r := makeResolverDependencies() + resolver := NewIPNSResolver(r) + publisher := NewIPNSPublisher(r, ds) + + // Publish one... + err := publisher.Publish(context.Background(), id.PrivateKey(), pathCat, PublishWithEOL(time.Now().Add(time.Hour*2))) + require.NoError(t, err) + + // Publish two... + err = publisher.Publish(context.Background(), id.PrivateKey(), pathDog, PublishWithEOL(time.Now().Add(time.Hour*5))) + require.NoError(t, err) + + // Should receive newer! + res, err := resolver.Resolve(context.Background(), name.AsPath()) + require.NoError(t, err) + require.Equal(t, pathDog, res.Path) + }) +} diff --git a/namesys/namesys.go b/namesys/namesys.go index df4403570..00b1f4d2d 100644 --- a/namesys/namesys.go +++ b/namesys/namesys.go @@ -20,7 +20,7 @@ import ( "time" lru "github.com/hashicorp/golang-lru/v2" - opts "github.com/ipfs/boxo/coreiface/options/namesys" + "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" @@ -32,35 +32,38 @@ import ( madns "github.com/multiformats/go-multiaddr-dns" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" ) -// mpns (a multi-protocol NameSystem) implements generic IPFS naming. +// namesys is a multi-protocol [NameSystem] that implements generic IPFS naming. +// It uses several [Resolver]s: // -// Uses several Resolvers: -// (a) IPFS routing naming: SFS-like PKI names. -// (b) dns domains: resolves using links in DNS TXT records +// 1. IPFS routing naming: SFS-like PKI names. +// 2. dns domains: resolves using links in DNS TXT records // -// It can only publish to: (a) IPFS routing naming. -type mpns struct { +// It can only publish to: 1. IPFS routing naming. +type namesys struct { ds ds.Datastore dnsResolver, ipnsResolver resolver ipnsPublisher Publisher - staticMap map[string]path.Path - cache *lru.Cache[string, any] + staticMap map[string]*cacheEntry + cache *lru.Cache[string, cacheEntry] } -type Option func(*mpns) error +var _ NameSystem = &namesys{} + +type Option func(*namesys) error // WithCache is an option that instructs the name system to use a (LRU) cache of the given size. func WithCache(size int) Option { - return func(ns *mpns) error { + return func(ns *namesys) error { if size <= 0 { return fmt.Errorf("invalid cache size %d; must be > 0", size) } - cache, err := lru.New[string, any](size) + cache, err := lru.New[string, cacheEntry](size) if err != nil { return err } @@ -70,42 +73,46 @@ func WithCache(size int) Option { } } -// WithDNSResolver is an option that supplies a custom DNS resolver to use instead of the system -// default. +// WithDNSResolver is an option that supplies a custom DNS resolver to use instead +// of the system default. func WithDNSResolver(rslv madns.BasicResolver) Option { - return func(ns *mpns) error { + return func(ns *namesys) error { ns.dnsResolver = NewDNSResolver(rslv.LookupTXT) return nil } } -// WithDatastore is an option that supplies a datastore to use instead of an in-memory map datastore. The datastore is used to store published IPNS records and make them available for querying. +// WithDatastore is an option that supplies a datastore to use instead of an in-memory map datastore. +// The datastore is used to store published IPNS Records and make them available for querying. func WithDatastore(ds ds.Datastore) Option { - return func(ns *mpns) error { + return func(ns *namesys) error { ns.ds = ds return nil } } -// NewNameSystem will construct the IPFS naming system based on Routing +// NewNameSystem constructs an IPFS [NameSystem] based on the given [routing.ValueStore]. func NewNameSystem(r routing.ValueStore, opts ...Option) (NameSystem, error) { - var staticMap map[string]path.Path + var staticMap map[string]*cacheEntry // Prewarm namesys cache with static records for deterministic tests and debugging. // Useful for testing things like DNSLink without real DNS lookup. // Example: // IPFS_NS_MAP="dnslink-test.example.com:/ipfs/bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am" if list := os.Getenv("IPFS_NS_MAP"); list != "" { - staticMap = make(map[string]path.Path) + staticMap = make(map[string]*cacheEntry) for _, pair := range strings.Split(list, ",") { mapping := strings.SplitN(pair, ":", 2) key := mapping[0] - value := path.FromString(mapping[1]) - staticMap[key] = value + value, err := path.NewPath(mapping[1]) + if err != nil { + return nil, err + } + staticMap[ipns.NamespacePrefix+key] = &cacheEntry{val: value, ttl: 0} } } - ns := &mpns{ + ns := &namesys{ staticMap: staticMap, } @@ -124,149 +131,109 @@ func NewNameSystem(r routing.ValueStore, opts ...Option) (NameSystem, error) { ns.dnsResolver = NewDNSResolver(madns.DefaultResolver.LookupTXT) } - ns.ipnsResolver = NewIpnsResolver(r) - ns.ipnsPublisher = NewIpnsPublisher(r, ns.ds) + ns.ipnsResolver = NewIPNSResolver(r) + ns.ipnsPublisher = NewIPNSPublisher(r, ns.ds) return ns, nil } -// DefaultResolverCacheTTL defines max ttl of a record placed in namesys cache. -const DefaultResolverCacheTTL = time.Minute - // Resolve implements Resolver. -func (ns *mpns) Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (path.Path, error) { - ctx, span := StartSpan(ctx, "MPNS.Resolve", trace.WithAttributes(attribute.String("Name", name))) +func (ns *namesys) Resolve(ctx context.Context, p path.Path, options ...ResolveOption) (Result, error) { + ctx, span := startSpan(ctx, "namesys.Resolve", trace.WithAttributes(attribute.Stringer("Path", p))) defer span.End() - if strings.HasPrefix(name, "/ipfs/") { - return path.ParsePath(name) - } - - if !strings.HasPrefix(name, "/") { - return path.ParsePath("/ipfs/" + name) - } - - return resolve(ctx, ns, name, opts.ProcessOpts(options)) + return resolve(ctx, ns, p, ProcessResolveOptions(options)) } -func (ns *mpns) ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result { - ctx, span := StartSpan(ctx, "MPNS.ResolveAsync", trace.WithAttributes(attribute.String("Name", name))) +func (ns *namesys) ResolveAsync(ctx context.Context, p path.Path, options ...ResolveOption) <-chan AsyncResult { + ctx, span := startSpan(ctx, "namesys.ResolveAsync", trace.WithAttributes(attribute.Stringer("Path", p))) defer span.End() - if strings.HasPrefix(name, "/ipfs/") { - p, err := path.ParsePath(name) - res := make(chan Result, 1) - res <- Result{p, err} - close(res) - return res - } - - if !strings.HasPrefix(name, "/") { - p, err := path.ParsePath("/ipfs/" + name) - res := make(chan Result, 1) - res <- Result{p, err} - close(res) - return res - } - - return resolveAsync(ctx, ns, name, opts.ProcessOpts(options)) + return resolveAsync(ctx, ns, p, ProcessResolveOptions(options)) } // resolveOnce implements resolver. -func (ns *mpns) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { - ctx, span := StartSpan(ctx, "MPNS.ResolveOnceAsync") +func (ns *namesys) resolveOnceAsync(ctx context.Context, p path.Path, options ResolveOptions) <-chan AsyncResult { + ctx, span := startSpan(ctx, "namesys.ResolveOnceAsync", trace.WithAttributes(attribute.Stringer("Path", p))) defer span.End() - out := make(chan onceResult, 1) - - if !strings.HasPrefix(name, ipnsPrefix) { - name = ipnsPrefix + name - } - segments := strings.SplitN(name, "/", 4) - if len(segments) < 3 || segments[0] != "" { - log.Debugf("invalid name syntax for %s", name) - out <- onceResult{err: ErrResolveFailed} + out := make(chan AsyncResult, 1) + if !p.Mutable() { + out <- AsyncResult{Path: p} close(out) return out } - key := segments[2] - - // Resolver selection: - // 1. if it is a PeerID/CID/multihash resolve through "ipns". - // 2. if it is a domain name, resolve through "dns" - - var res resolver - ipnsKey, err := peer.Decode(key) - // CIDs in IPNS are expected to have libp2p-key multicodec - // We ease the transition by returning a more meaningful error with a valid CID + segments := p.Segments() + resolvablePath, err := path.NewPathFromSegments(segments[0], segments[1]) if err != nil { - ipnsCid, cidErr := cid.Decode(key) - if cidErr == nil && ipnsCid.Version() == 1 && ipnsCid.Type() != cid.Libp2pKey { - fixedCid := cid.NewCidV1(cid.Libp2pKey, ipnsCid.Hash()).String() - codecErr := fmt.Errorf("peer ID represented as CIDv1 require libp2p-key multicodec: retry with /ipns/%s", fixedCid) - log.Debugf("RoutingResolver: could not convert public key hash %q to peer ID: %s\n", key, codecErr) - out <- onceResult{err: codecErr} - close(out) - return out - } - } - - cacheKey := key - if err == nil { - cacheKey = string(ipnsKey) + out <- AsyncResult{Err: err} + close(out) + return out } - if p, ok := ns.cacheGet(cacheKey); ok { - var err error - if len(segments) > 3 { - p, err = path.FromSegments("", strings.TrimRight(p.String(), "/"), segments[3]) - } + if resolvedBase, ttl, lastMod, ok := ns.cacheGet(resolvablePath.String()); ok { + p, err = joinPaths(resolvedBase, p) span.SetAttributes(attribute.Bool("CacheHit", true)) span.RecordError(err) - - out <- onceResult{value: p, err: err} + out <- AsyncResult{Path: p, TTL: ttl, LastMod: lastMod, Err: err} close(out) return out + } else { + span.SetAttributes(attribute.Bool("CacheHit", false)) } - span.SetAttributes(attribute.Bool("CacheHit", false)) - if err == nil { + // Resolver selection: + // 1. If it is an IPNS Name, resolve through IPNS. + // 2. if it is a domain name, resolve through DNSLink. + + var res resolver + if _, err := ipns.NameFromString(segments[1]); err == nil { res = ns.ipnsResolver - } else if _, ok := dns.IsDomainName(key); ok { + } else if _, ok := dns.IsDomainName(segments[1]); ok { res = ns.dnsResolver } else { - out <- onceResult{err: fmt.Errorf("invalid IPNS root: %q", key)} + // CIDs in IPNS are expected to have libp2p-key multicodec + // We ease the transition by returning a more meaningful error with a valid CID + ipnsCid, cidErr := cid.Decode(segments[1]) + if cidErr == nil && ipnsCid.Version() == 1 && ipnsCid.Type() != cid.Libp2pKey { + fixedCid := cid.NewCidV1(cid.Libp2pKey, ipnsCid.Hash()).String() + codecErr := fmt.Errorf("peer ID represented as CIDv1 require libp2p-key multicodec: retry with /ipns/%s", fixedCid) + log.Debugf("RoutingResolver: could not convert public key hash %q to peer ID: %s\n", segments[1], codecErr) + out <- AsyncResult{Err: codecErr} + } else { + out <- AsyncResult{Err: fmt.Errorf("cannot resolve: %q", resolvablePath.String())} + } + close(out) return out } - resCh := res.resolveOnceAsync(ctx, key, options) - var best onceResult + resCh := res.resolveOnceAsync(ctx, resolvablePath, options) + var best AsyncResult go func() { defer close(out) for { select { case res, ok := <-resCh: if !ok { - if best != (onceResult{}) { - ns.cacheSet(cacheKey, best.value, best.ttl) + if best != (AsyncResult{}) { + ns.cacheSet(resolvablePath.String(), best.Path, best.TTL, best.LastMod) } return } - if res.err == nil { + + if res.Err == nil { best = res } - p := res.value - err := res.err - ttl := res.ttl - // Attach rest of the path - if len(segments) > 3 { - p, err = path.FromSegments("", strings.TrimRight(p.String(), "/"), segments[3]) + p, err := joinPaths(res.Path, p) + if err != nil { + // res.Err may already be defined, so just combine them + res.Err = multierr.Combine(err, res.Err) } - emitOnceResult(ctx, out, onceResult{value: p, ttl: ttl, err: err}) + emitOnceResult(ctx, out, AsyncResult{Path: p, TTL: res.TTL, LastMod: res.LastMod, Err: res.Err}) case <-ctx.Done(): return } @@ -276,7 +243,7 @@ func (ns *mpns) resolveOnceAsync(ctx context.Context, name string, options opts. return out } -func emitOnceResult(ctx context.Context, outCh chan<- onceResult, r onceResult) { +func emitOnceResult(ctx context.Context, outCh chan<- AsyncResult, r AsyncResult) { select { case outCh <- r: case <-ctx.Done(): @@ -284,30 +251,35 @@ func emitOnceResult(ctx context.Context, outCh chan<- onceResult, r onceResult) } // Publish implements Publisher -func (ns *mpns) Publish(ctx context.Context, name ci.PrivKey, value path.Path, options ...opts.PublishOption) error { - ctx, span := StartSpan(ctx, "MPNS.Publish") +func (ns *namesys) Publish(ctx context.Context, name ci.PrivKey, value path.Path, options ...PublishOption) error { + ctx, span := startSpan(ctx, "namesys.Publish") defer span.End() // This is a bit hacky. We do this because the EOL is based on the current // time, but also needed in the end of the function. Therefore, we parse // the options immediately and add an option PublishWithEOL with the EOL // calculated in this moment. - publishOpts := opts.ProcessPublishOptions(options) - options = append(options, opts.PublishWithEOL(publishOpts.EOL)) + publishOpts := ProcessPublishOptions(options) + options = append(options, PublishWithEOL(publishOpts.EOL)) - id, err := peer.IDFromPrivateKey(name) + pid, err := peer.IDFromPrivateKey(name) if err != nil { span.RecordError(err) return err } - span.SetAttributes(attribute.String("ID", id.String())) + + ipnsName := ipns.NameFromPeer(pid) + cacheKey := ipnsName.String() + + span.SetAttributes(attribute.String("ID", pid.String())) if err := ns.ipnsPublisher.Publish(ctx, name, value, options...); err != nil { // Invalidate the cache. Publishing may _partially_ succeed but // still return an error. - ns.cacheInvalidate(string(id)) + ns.cacheInvalidate(cacheKey) span.RecordError(err) return err } + ttl := DefaultResolverCacheTTL if publishOpts.TTL >= 0 { ttl = publishOpts.TTL @@ -315,6 +287,20 @@ func (ns *mpns) Publish(ctx context.Context, name ci.PrivKey, value path.Path, o if ttEOL := time.Until(publishOpts.EOL); ttEOL < ttl { ttl = ttEOL } - ns.cacheSet(string(id), value, ttl) + ns.cacheSet(cacheKey, value, ttl, time.Now()) return nil } + +// Resolve is an utility function that takes a [NameSystem] and a [path.Path], and +// returns the result of [NameSystem.Resolve] for the given path. If the given namesys +// is nil, [ErrNoNamesys] is returned. +func Resolve(ctx context.Context, ns NameSystem, p path.Path) (Result, error) { + ctx, span := startSpan(ctx, "Resolve", trace.WithAttributes(attribute.Stringer("Path", p))) + defer span.End() + + if ns == nil { + return Result{}, ErrNoNamesys + } + + return ns.Resolve(ctx, p) +} diff --git a/namesys/namesys_cache.go b/namesys/namesys_cache.go new file mode 100644 index 000000000..fc8842e3b --- /dev/null +++ b/namesys/namesys_cache.go @@ -0,0 +1,78 @@ +package namesys + +import ( + "time" + + "github.com/ipfs/boxo/path" +) + +type cacheEntry struct { + val path.Path // is the value of this entry + ttl time.Duration // is the ttl of this entry + lastMod time.Time // is the last time this entry was modified + cacheEOL time.Time // is until when we keep this entry in cache +} + +func (ns *namesys) cacheGet(name string) (path.Path, time.Duration, time.Time, bool) { + // existence of optional mapping defined via IPFS_NS_MAP is checked first + if ns.staticMap != nil { + entry, ok := ns.staticMap[name] + if ok { + return entry.val, entry.ttl, entry.lastMod, true + } + } + + if ns.cache == nil { + return nil, 0, time.Now(), false + } + + entry, ok := ns.cache.Get(name) + if !ok { + return nil, 0, time.Now(), false + } + + if time.Now().Before(entry.cacheEOL) { + return entry.val, entry.ttl, entry.lastMod, true + } + + // We do not delete the entry from the cache. Removals are handled by the + // backing cache system. It is useful to keep it since cacheSet can use + // previously existing values to heuristically update a cache entry. + return nil, 0, time.Now(), false +} + +func (ns *namesys) cacheSet(name string, val path.Path, ttl time.Duration, lastMod time.Time) { + if ns.cache == nil || ttl <= 0 { + return + } + + // Set the current date if there's no lastMod. + if lastMod.IsZero() { + lastMod = time.Now() + } + + // If there's an already cached version with the same path, but + // different lastMod date, keep the oldest. + entry, ok := ns.cache.Get(name) + if ok && entry.val.String() == val.String() { + if lastMod.After(entry.lastMod) { + lastMod = entry.lastMod + } + } + + // Add automatically evicts previous entry, so it works for updating. + ns.cache.Add(name, cacheEntry{ + val: val, + ttl: ttl, + lastMod: lastMod, + cacheEOL: time.Now().Add(ttl), + }) +} + +func (ns *namesys) cacheInvalidate(name string) { + if ns.cache == nil { + return + } + + ns.cache.Remove(name) +} diff --git a/namesys/namesys_test.go b/namesys/namesys_test.go index 52fce6794..41fa0ce88 100644 --- a/namesys/namesys_test.go +++ b/namesys/namesys_test.go @@ -2,12 +2,9 @@ package namesys import ( "context" - "errors" - "fmt" "testing" "time" - opts "github.com/ipfs/boxo/coreiface/options/namesys" "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" offroute "github.com/ipfs/boxo/routing/offline" @@ -16,32 +13,33 @@ import ( record "github.com/libp2p/go-libp2p-record" ci "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + "github.com/stretchr/testify/require" ) type mockResolver struct { entries map[string]string } -func testResolution(t *testing.T, resolver Resolver, name string, depth uint, expected string, expError error) { +func testResolution(t *testing.T, resolver Resolver, name string, depth uint, expected string, expectedTTL time.Duration, expectedError error) { t.Helper() - p, err := resolver.Resolve(context.Background(), name, opts.Depth(depth)) - if !errors.Is(err, expError) { - t.Fatal(fmt.Errorf( - "expected %s with a depth of %d to have a '%s' error, but got '%s'", - name, depth, expError, err)) - } - if p.String() != expected { - t.Fatal(fmt.Errorf( - "%s with depth %d resolved to %s != %s", - name, depth, p.String(), expected)) + + ptr, err := path.NewPath(name) + require.NoError(t, err) + + res, err := resolver.Resolve(context.Background(), ptr, ResolveWithDepth(depth)) + require.ErrorIs(t, err, expectedError) + require.Equal(t, expectedTTL, res.TTL) + if expected == "" { + require.Nil(t, res.Path, "%s with depth %d", name, depth) + } else { + require.Equal(t, expected, res.Path.String(), "%s with depth %d", name, depth) } } -func (r *mockResolver) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { - p, err := path.ParsePath(r.entries[name]) - out := make(chan onceResult, 1) - out <- onceResult{value: p, err: err} +func (r *mockResolver) resolveOnceAsync(ctx context.Context, p path.Path, options ResolveOptions) <-chan AsyncResult { + p, err := path.NewPath(r.entries[p.String()]) + out := make(chan AsyncResult, 1) + out <- AsyncResult{Path: p, Err: err} close(out) return out } @@ -49,12 +47,12 @@ func (r *mockResolver) resolveOnceAsync(ctx context.Context, name string, option func mockResolverOne() *mockResolver { return &mockResolver{ entries: map[string]string{ - "QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy": "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", - "QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n": "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", - "QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD": "/ipns/ipfs.io", - "QmQ4QZh8nrsczdUEwTyfBope4THUhqxqc1fx6qYhhzZQei": "/ipfs/QmP3ouCnU8NNLsW6261pAx2pNLV2E4dQoisB1sgda12Act", - "12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", // ed25519+identity multihash - "bafzbeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", // cidv1 in base32 with libp2p-key multicodec + "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy": "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", + "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n": "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", + "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD": "/ipns/ipfs.io", + "/ipns/QmQ4QZh8nrsczdUEwTyfBope4THUhqxqc1fx6qYhhzZQei": "/ipfs/QmP3ouCnU8NNLsW6261pAx2pNLV2E4dQoisB1sgda12Act", + "/ipns/12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", // ed25519+identity multihash + "/ipns/bafzbeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", // cidv1 in base32 with libp2p-key multicodec }, } } @@ -62,123 +60,106 @@ func mockResolverOne() *mockResolver { func mockResolverTwo() *mockResolver { return &mockResolver{ entries: map[string]string{ - "ipfs.io": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", + "/ipns/ipfs.io": "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", }, } } func TestNamesysResolution(t *testing.T) { - r := &mpns{ + r := &namesys{ ipnsResolver: mockResolverOne(), dnsResolver: mockResolverTwo(), } - testResolution(t, r, "Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) - testResolution(t, r, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) - testResolution(t, r, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) - testResolution(t, r, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 1, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", ErrResolveRecursion) - testResolution(t, r, "/ipns/ipfs.io", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) - testResolution(t, r, "/ipns/ipfs.io", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) - testResolution(t, r, "/ipns/ipfs.io", 2, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", ErrResolveRecursion) - testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", opts.DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", nil) - testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 1, "/ipns/ipfs.io", ErrResolveRecursion) - testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 2, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) - testResolution(t, r, "/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 3, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", ErrResolveRecursion) - testResolution(t, r, "/ipns/12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) - testResolution(t, r, "/ipns/bafzbeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", ErrResolveRecursion) + for _, testCase := range []struct { + name string + depth uint + expectedPath string + expectedTTL time.Duration + expectedError error + }{ + {"/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", 0, nil}, + {"/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", 0, nil}, + {"/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", 0, nil}, + {"/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 1, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", 0, ErrResolveRecursion}, + {"/ipns/ipfs.io", DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", 0, nil}, + {"/ipns/ipfs.io", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 0, ErrResolveRecursion}, + {"/ipns/ipfs.io", 2, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", 0, ErrResolveRecursion}, + {"/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", DefaultDepthLimit, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj", 0, nil}, + {"/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 1, "/ipns/ipfs.io", 0, ErrResolveRecursion}, + {"/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 2, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 0, ErrResolveRecursion}, + {"/ipns/QmY3hE8xgFCjGcz6PHgnvJz5HZi1BaKRfPkn1ghZUcYMjD", 3, "/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy", 0, ErrResolveRecursion}, + {"/ipns/12D3KooWFB51PRY9BxcXSH6khFXw1BZeszeLDy7C8GciskqCTZn5", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 0, ErrResolveRecursion}, + {"/ipns/bafzbeickencdqw37dpz3ha36ewrh4undfjt2do52chtcky4rxkj447qhdm", 1, "/ipns/QmbCMUZw6JFeZ7Wp9jkzbye3Fzp2GGcPgC3nmeUjfVF87n", 0, ErrResolveRecursion}, + } { + t.Run(testCase.name, func(t *testing.T) { + testResolution(t, r, testCase.name, (testCase.depth), testCase.expectedPath, 0, testCase.expectedError) + }) + } +} + +func TestResolveIPNS(t *testing.T) { + ns := &namesys{ + ipnsResolver: mockResolverOne(), + dnsResolver: mockResolverTwo(), + } + + inputPath, err := path.NewPath("/ipns/QmatmE9msSfkKxoffpHwNLNKgwZG8eT9Bud6YoPab52vpy/a/b/c") + require.NoError(t, err) + + res, err := Resolve(context.Background(), ns, inputPath) + require.NoError(t, err) + require.Equal(t, "/ipfs/Qmcqtw8FfrVSBaRmbWwHxt3AuySBhJLcvmFYi3Lbc4xnwj/a/b/c", res.Path.String()) } func TestPublishWithCache0(t *testing.T) { dst := dssync.MutexWrap(ds.NewMapDatastore()) - priv, _, err := ci.GenerateKeyPair(ci.RSA, 2048) - if err != nil { - t.Fatal(err) - } - ps, err := pstoremem.NewPeerstore() - if err != nil { - t.Fatal(err) - } - pid, err := peer.IDFromPrivateKey(priv) - if err != nil { - t.Fatal(err) - } - err = ps.AddPrivKey(pid, priv) - if err != nil { - t.Fatal(err) - } + priv, _, err := ci.GenerateKeyPair(ci.RSA, 4096) + require.NoError(t, err) routing := offroute.NewOfflineRouter(dst, record.NamespacedValidator{ - "ipns": ipns.Validator{KeyBook: ps}, + "ipns": ipns.Validator{}, // No need for KeyBook, as records created by NameSys include PublicKey for RSA. "pk": record.PublicKeyValidator{}, }) nsys, err := NewNameSystem(routing, WithDatastore(dst)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // CID is arbitrary. - p, err := path.ParsePath("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") - if err != nil { - t.Fatal(err) - } + p, err := path.NewPath("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") + require.NoError(t, err) + err = nsys.Publish(context.Background(), priv, p) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } func TestPublishWithTTL(t *testing.T) { dst := dssync.MutexWrap(ds.NewMapDatastore()) priv, _, err := ci.GenerateKeyPair(ci.RSA, 2048) - if err != nil { - t.Fatal(err) - } - ps, err := pstoremem.NewPeerstore() - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + pid, err := peer.IDFromPrivateKey(priv) - if err != nil { - t.Fatal(err) - } - err = ps.AddPrivKey(pid, priv) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) routing := offroute.NewOfflineRouter(dst, record.NamespacedValidator{ - "ipns": ipns.Validator{KeyBook: ps}, + "ipns": ipns.Validator{}, // No need for KeyBook, as records created by NameSys include PublicKey for RSA. "pk": record.PublicKeyValidator{}, }) - nsys, err := NewNameSystem(routing, WithDatastore(dst), WithCache(128)) - if err != nil { - t.Fatal(err) - } + ns, err := NewNameSystem(routing, WithDatastore(dst), WithCache(128)) + require.NoError(t, err) // CID is arbitrary. - p, err := path.ParsePath("QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") - if err != nil { - t.Fatal(err) - } + p, err := path.NewPath("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") + require.NoError(t, err) ttl := 1 * time.Second eol := time.Now().Add(2 * time.Second) - err = nsys.Publish(context.Background(), priv, p, opts.PublishWithEOL(eol), opts.PublishWithTTL(ttl)) - if err != nil { - t.Fatal(err) - } - ientry, ok := nsys.(*mpns).cache.Get(string(pid)) - if !ok { - t.Fatal("cache get failed") - } - entry, ok := ientry.(cacheEntry) - if !ok { - t.Fatal("bad cache item returned") - } - if entry.eol.Sub(eol) > 10*time.Millisecond { - t.Fatalf("bad cache ttl: expected %s, got %s", eol, entry.eol) - } + err = ns.Publish(context.Background(), priv, p, PublishWithEOL(eol), PublishWithTTL(ttl)) + require.NoError(t, err) + + entry, ok := ns.(*namesys).cache.Get(ipns.NameFromPeer(pid).String()) + require.True(t, ok) + require.LessOrEqual(t, entry.cacheEOL.Sub(eol), 10*time.Millisecond) } diff --git a/namesys/publisher.go b/namesys/publisher.go deleted file mode 100644 index c913b0bbc..000000000 --- a/namesys/publisher.go +++ /dev/null @@ -1,287 +0,0 @@ -package namesys - -import ( - "context" - "strings" - "sync" - "time" - - opts "github.com/ipfs/boxo/coreiface/options/namesys" - "github.com/ipfs/boxo/ipns" - "github.com/ipfs/boxo/path" - ds "github.com/ipfs/go-datastore" - dsquery "github.com/ipfs/go-datastore/query" - "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" - "github.com/whyrusleeping/base32" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -const ipnsPrefix = "/ipns/" - -// IpnsPublisher is capable of publishing and resolving names to the IPFS -// routing system. -type IpnsPublisher struct { - routing routing.ValueStore - ds ds.Datastore - - // Used to ensure we assign IPNS records *sequential* sequence numbers. - mu sync.Mutex -} - -// NewIpnsPublisher constructs a publisher for the IPFS Routing name system. -func NewIpnsPublisher(route routing.ValueStore, ds ds.Datastore) *IpnsPublisher { - if ds == nil { - panic("nil datastore") - } - return &IpnsPublisher{routing: route, ds: ds} -} - -// Publish implements Publisher. Accepts a keypair and a value, -// and publishes it out to the routing system -func (p *IpnsPublisher) Publish(ctx context.Context, k crypto.PrivKey, value path.Path, options ...opts.PublishOption) error { - log.Debugf("Publish %s", value) - - ctx, span := StartSpan(ctx, "IpnsPublisher.Publish", trace.WithAttributes(attribute.String("Value", value.String()))) - defer span.End() - - record, err := p.updateRecord(ctx, k, value, options...) - if err != nil { - return err - } - - return PutRecordToRouting(ctx, p.routing, k.GetPublic(), record) -} - -// IpnsDsKey returns a datastore key given an IPNS identifier (peer -// ID). Defines the storage key for IPNS records in the local datastore. -func IpnsDsKey(id peer.ID) ds.Key { - return ds.NewKey("/ipns/" + base32.RawStdEncoding.EncodeToString([]byte(id))) -} - -// ListPublished returns the latest IPNS records published by this node and -// their expiration times. -// -// This method will not search the routing system for records published by other -// nodes. -func (p *IpnsPublisher) ListPublished(ctx context.Context) (map[peer.ID]*ipns.Record, error) { - query, err := p.ds.Query(ctx, dsquery.Query{ - Prefix: ipnsPrefix, - }) - if err != nil { - return nil, err - } - defer query.Close() - - records := make(map[peer.ID]*ipns.Record) - for { - select { - case result, ok := <-query.Next(): - if !ok { - return records, nil - } - if result.Error != nil { - return nil, result.Error - } - rec, err := ipns.UnmarshalRecord(result.Value) - if err != nil { - // Might as well return what we can. - log.Error("found an invalid IPNS entry:", err) - continue - } - if !strings.HasPrefix(result.Key, ipnsPrefix) { - log.Errorf("datastore query for keys with prefix %s returned a key: %s", ipnsPrefix, result.Key) - continue - } - k := result.Key[len(ipnsPrefix):] - pid, err := base32.RawStdEncoding.DecodeString(k) - if err != nil { - log.Errorf("ipns ds key invalid: %s", result.Key) - continue - } - records[peer.ID(pid)] = rec - case <-ctx.Done(): - return nil, ctx.Err() - } - } -} - -// GetPublished returns the record this node has published corresponding to the -// given peer ID. -// -// If `checkRouting` is true and we have no existing record, this method will -// check the routing system for any existing records. -func (p *IpnsPublisher) GetPublished(ctx context.Context, id peer.ID, checkRouting bool) (*ipns.Record, error) { - ctx, cancel := context.WithTimeout(ctx, time.Second*30) - defer cancel() - - value, err := p.ds.Get(ctx, IpnsDsKey(id)) - switch err { - case nil: - case ds.ErrNotFound: - if !checkRouting { - return nil, nil - } - ipnskey := string(ipns.NameFromPeer(id).RoutingKey()) - value, err = p.routing.GetValue(ctx, ipnskey) - if err != nil { - // Not found or other network issue. Can't really do - // anything about this case. - if err != routing.ErrNotFound { - log.Debugf("error when determining the last published IPNS record for %s: %s", id, err) - } - - return nil, nil - } - default: - return nil, err - } - - return ipns.UnmarshalRecord(value) -} - -func (p *IpnsPublisher) updateRecord(ctx context.Context, k crypto.PrivKey, value path.Path, options ...opts.PublishOption) (*ipns.Record, error) { - id, err := peer.IDFromPrivateKey(k) - if err != nil { - return nil, err - } - - p.mu.Lock() - defer p.mu.Unlock() - - // get previous records sequence number - rec, err := p.GetPublished(ctx, id, true) - if err != nil { - return nil, err - } - - seqno := uint64(0) - if rec != nil { - seqno, err = rec.Sequence() - if err != nil { - return nil, err - } - - p, err := rec.Value() - if err != nil { - return nil, err - } - if value != path.Path(p.String()) { - // Don't bother incrementing the sequence number unless the - // value changes. - seqno++ - } - } - - opts := opts.ProcessPublishOptions(options) - - // Create record - r, err := ipns.NewRecord(k, value, seqno, opts.EOL, opts.TTL, ipns.WithV1Compatibility(opts.CompatibleWithV1)) - if err != nil { - return nil, err - } - - data, err := ipns.MarshalRecord(r) - if err != nil { - return nil, err - } - - // Put the new record. - key := IpnsDsKey(id) - if err := p.ds.Put(ctx, key, data); err != nil { - return nil, err - } - if err := p.ds.Sync(ctx, key); err != nil { - return nil, err - } - return r, nil -} - -// PutRecordToRouting publishes the given entry using the provided ValueStore, -// keyed on the ID associated with the provided public key. The public key is -// also made available to the routing system so that entries can be verified. -func PutRecordToRouting(ctx context.Context, r routing.ValueStore, k crypto.PubKey, rec *ipns.Record) error { - ctx, span := StartSpan(ctx, "PutRecordToRouting") - defer span.End() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - errs := make(chan error, 2) // At most two errors (IPNS, and public key) - - id, err := peer.IDFromPublicKey(k) - if err != nil { - return err - } - - go func() { - errs <- PublishEntry(ctx, r, string(ipns.NameFromPeer(id).RoutingKey()), rec) - }() - - // Publish the public key if a public key cannot be extracted from the ID - // TODO: once v0.4.16 is widespread enough, we can stop doing this - // and at that point we can even deprecate the /pk/ namespace in the dht - // - // NOTE: This check actually checks if the public key has been embedded - // in the IPNS entry. This check is sufficient because we embed the - // public key in the IPNS entry if it can't be extracted from the ID. - if _, err := rec.PubKey(); err == nil { - go func() { - errs <- PublishPublicKey(ctx, r, PkKeyForID(id), k) - }() - - if err := waitOnErrChan(ctx, errs); err != nil { - return err - } - } - - return waitOnErrChan(ctx, errs) -} - -func waitOnErrChan(ctx context.Context, errs chan error) error { - select { - case err := <-errs: - return err - case <-ctx.Done(): - return ctx.Err() - } -} - -// PublishPublicKey stores the given public key in the ValueStore with the -// given key. -func PublishPublicKey(ctx context.Context, r routing.ValueStore, k string, pubk crypto.PubKey) error { - ctx, span := StartSpan(ctx, "PublishPublicKey", trace.WithAttributes(attribute.String("Key", k))) - defer span.End() - - log.Debugf("Storing pubkey at: %s", k) - pkbytes, err := crypto.MarshalPublicKey(pubk) - if err != nil { - return err - } - - // Store associated public key - return r.PutValue(ctx, k, pkbytes) -} - -// PublishEntry stores the given IpnsEntry in the ValueStore with the given -// ipnskey. -func PublishEntry(ctx context.Context, r routing.ValueStore, ipnskey string, rec *ipns.Record) error { - ctx, span := StartSpan(ctx, "PublishEntry", trace.WithAttributes(attribute.String("IPNSKey", ipnskey))) - defer span.End() - - data, err := ipns.MarshalRecord(rec) - if err != nil { - return err - } - - log.Debugf("Storing ipns entry at: %x", ipnskey) - // Store ipns entry at "/ipns/"+h(pubkey) - return r.PutValue(ctx, ipnskey, data) -} - -// PkKeyForID returns the public key routing key for the given peer ID. -func PkKeyForID(id peer.ID) string { - return "/pk/" + string(id) -} diff --git a/namesys/publisher_test.go b/namesys/publisher_test.go deleted file mode 100644 index ad975f59a..000000000 --- a/namesys/publisher_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package namesys - -import ( - "context" - "crypto/rand" - "testing" - "time" - - "github.com/ipfs/boxo/path" - - dshelp "github.com/ipfs/boxo/datastore/dshelp" - "github.com/ipfs/boxo/ipns" - mockrouting "github.com/ipfs/boxo/routing/mock" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - testutil "github.com/libp2p/go-libp2p-testing/net" - ci "github.com/libp2p/go-libp2p/core/crypto" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -type identity struct { - testutil.PeerNetParams -} - -func (p *identity) ID() peer.ID { - return p.PeerNetParams.ID -} - -func (p *identity) Address() ma.Multiaddr { - return p.Addr -} - -func (p *identity) PrivateKey() ci.PrivKey { - return p.PrivKey -} - -func (p *identity) PublicKey() ci.PubKey { - return p.PubKey -} - -func testNamekeyPublisher(t *testing.T, keyType int, expectedErr error, expectedExistence bool) { - // Context - ctx := context.Background() - - // Private key - privKey, pubKey, err := ci.GenerateKeyPairWithReader(keyType, 2048, rand.Reader) - if err != nil { - t.Fatal(err) - } - - // ID - id, err := peer.IDFromPublicKey(pubKey) - if err != nil { - t.Fatal(err) - } - - // Value - value := path.Path("ipfs/TESTING") - - // Seqnum - seqnum := uint64(0) - - // Eol - eol := time.Now().Add(24 * time.Hour) - - // Routing value store - p := testutil.PeerNetParams{ - ID: id, - PrivKey: privKey, - PubKey: pubKey, - Addr: testutil.ZeroLocalTCPAddress, - } - - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - serv := mockrouting.NewServer() - r := serv.ClientWithDatastore(context.Background(), &identity{p}, dstore) - - rec, err := ipns.NewRecord(privKey, value, seqnum, eol, 0) - if err != nil { - t.Fatal(err) - } - - err = PutRecordToRouting(ctx, r, pubKey, rec) - if err != nil { - t.Fatal(err) - } - - // Check for namekey existence in value store - namekey := PkKeyForID(id) - _, err = r.GetValue(ctx, namekey) - if err != expectedErr { - t.Fatal(err) - } - - // Also check datastore for completeness - key := dshelp.NewKeyFromBinary([]byte(namekey)) - exists, err := dstore.Has(ctx, key) - if err != nil { - t.Fatal(err) - } - - if exists != expectedExistence { - t.Fatal("Unexpected key existence in datastore") - } -} - -func TestRSAPublisher(t *testing.T) { - testNamekeyPublisher(t, ci.RSA, nil, true) -} - -func TestEd22519Publisher(t *testing.T) { - testNamekeyPublisher(t, ci.Ed25519, ds.ErrNotFound, false) -} - -func TestAsyncDS(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - rt := mockrouting.NewServer().Client(testutil.RandIdentityOrFatal(t)) - ds := &checkSyncDS{ - Datastore: ds.NewMapDatastore(), - syncKeys: make(map[ds.Key]struct{}), - } - publisher := NewIpnsPublisher(rt, ds) - - ipnsFakeID := testutil.RandIdentityOrFatal(t) - ipnsVal, err := path.ParsePath("/ipns/foo.bar") - if err != nil { - t.Fatal(err) - } - - if err := publisher.Publish(ctx, ipnsFakeID.PrivateKey(), ipnsVal); err != nil { - t.Fatal(err) - } - - ipnsKey := IpnsDsKey(ipnsFakeID.ID()) - - for k := range ds.syncKeys { - if k.IsAncestorOf(ipnsKey) || k.Equal(ipnsKey) { - return - } - } - - t.Fatal("ipns key not synced") -} - -type checkSyncDS struct { - ds.Datastore - syncKeys map[ds.Key]struct{} -} - -func (d *checkSyncDS) Sync(ctx context.Context, prefix ds.Key) error { - d.syncKeys[prefix] = struct{}{} - return d.Datastore.Sync(ctx, prefix) -} diff --git a/namesys/republisher/repub.go b/namesys/republisher/repub.go index 87200ff5c..347f4cf5a 100644 --- a/namesys/republisher/repub.go +++ b/namesys/republisher/repub.go @@ -5,14 +5,15 @@ package republisher import ( "context" "errors" + "fmt" "time" - keystore "github.com/ipfs/boxo/keystore" + "github.com/ipfs/boxo/keystore" "github.com/ipfs/boxo/namesys" - "github.com/ipfs/boxo/path" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" - opts "github.com/ipfs/boxo/coreiface/options/namesys" "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -22,24 +23,27 @@ import ( "github.com/libp2p/go-libp2p/core/peer" ) -var errNoEntry = errors.New("no previous entry") - -var log = logging.Logger("ipns-repub") +var ( + errNoEntry = errors.New("no previous entry") + log = logging.Logger("ipns/repub") +) -// DefaultRebroadcastInterval is the default interval at which we rebroadcast IPNS records -var DefaultRebroadcastInterval = time.Hour * 4 +const ( + // DefaultRebroadcastInterval is the default interval at which we rebroadcast IPNS records + DefaultRebroadcastInterval = time.Hour * 4 -// InitialRebroadcastDelay is the delay before first broadcasting IPNS records on start -var InitialRebroadcastDelay = time.Minute * 1 + // InitialRebroadcastDelay is the delay before first broadcasting IPNS records on start + InitialRebroadcastDelay = time.Minute * 1 -// FailureRetryInterval is the interval at which we retry IPNS records broadcasts (when they fail) -var FailureRetryInterval = time.Minute * 5 + // FailureRetryInterval is the interval at which we retry IPNS records broadcasts (when they fail) + FailureRetryInterval = time.Minute * 5 -// DefaultRecordLifetime is the default lifetime for IPNS records -const DefaultRecordLifetime = time.Hour * 24 + // DefaultRecordLifetime is the default lifetime for IPNS records + DefaultRecordLifetime = ipns.DefaultRecordLifetime +) // Republisher facilitates the regular publishing of all the IPNS records -// associated to keys in a Keystore. +// associated to keys in a [keystore.Keystore]. type Republisher struct { ns namesys.Publisher ds ds.Datastore @@ -52,7 +56,7 @@ type Republisher struct { RecordLifetime time.Duration } -// NewRepublisher creates a new Republisher +// NewRepublisher creates a new [Republisher] from the given options. func NewRepublisher(ns namesys.Publisher, ds ds.Datastore, self ic.PrivKey, ks keystore.Keystore) *Republisher { return &Republisher{ ns: ns, @@ -64,8 +68,7 @@ func NewRepublisher(ns namesys.Publisher, ds ds.Datastore, self ic.PrivKey, ks k } } -// Run starts the republisher facility. It can be stopped by stopping the -// provided proc. +// Run starts the republisher facility. It can be stopped by stopping the provided proc. func (rp *Republisher) Run(proc goprocess.Process) { timer := time.NewTimer(InitialRebroadcastDelay) defer timer.Stop() @@ -93,7 +96,7 @@ func (rp *Republisher) Run(proc goprocess.Process) { func (rp *Republisher) republishEntries(p goprocess.Process) error { ctx, cancel := context.WithCancel(gpctx.OnClosingContext(p)) defer cancel() - ctx, span := namesys.StartSpan(ctx, "Republisher.RepublishEntries") + ctx, span := startSpan(ctx, "Republisher.RepublishEntries") defer span.End() // TODO: Use rp.ipns.ListPublished(). We can't currently *do* that @@ -127,7 +130,7 @@ func (rp *Republisher) republishEntries(p goprocess.Process) error { } func (rp *Republisher) republishEntry(ctx context.Context, priv ic.PrivKey) error { - ctx, span := namesys.StartSpan(ctx, "Republisher.RepublishEntry") + ctx, span := startSpan(ctx, "Republisher.RepublishEntry") defer span.End() id, err := peer.IDFromPrivateKey(priv) if err != nil { @@ -138,7 +141,7 @@ func (rp *Republisher) republishEntry(ctx context.Context, priv ic.PrivKey) erro log.Debugf("republishing ipns entry for %s", id) // Look for it locally only - rec, err := rp.getLastIPNSRecord(ctx, id) + rec, err := rp.getLastIPNSRecord(ctx, ipns.NameFromPeer(id)) if err != nil { if err == errNoEntry { span.SetAttributes(attribute.Bool("NoEntry", true)) @@ -165,14 +168,14 @@ func (rp *Republisher) republishEntry(ctx context.Context, priv ic.PrivKey) erro if prevEol.After(eol) { eol = prevEol } - err = rp.ns.Publish(ctx, priv, path.Path(p.String()), opts.PublishWithEOL(eol)) + err = rp.ns.Publish(ctx, priv, p, namesys.PublishWithEOL(eol)) span.RecordError(err) return err } -func (rp *Republisher) getLastIPNSRecord(ctx context.Context, id peer.ID) (*ipns.Record, error) { +func (rp *Republisher) getLastIPNSRecord(ctx context.Context, name ipns.Name) (*ipns.Record, error) { // Look for it locally only - val, err := rp.ds.Get(ctx, namesys.IpnsDsKey(id)) + val, err := rp.ds.Get(ctx, namesys.IpnsDsKey(name)) switch err { case nil: case ds.ErrNotFound: @@ -183,3 +186,9 @@ func (rp *Republisher) getLastIPNSRecord(ctx context.Context, id peer.ID) (*ipns return ipns.UnmarshalRecord(val) } + +var tracer = otel.Tracer("boxo/namesys/republisher") + +func startSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return tracer.Start(ctx, fmt.Sprintf("Namesys.%s", name)) +} diff --git a/namesys/republisher/repub_test.go b/namesys/republisher/repub_test.go index d6c7b0d85..74297a41c 100644 --- a/namesys/republisher/repub_test.go +++ b/namesys/republisher/repub_test.go @@ -13,8 +13,8 @@ import ( host "github.com/libp2p/go-libp2p/core/host" peer "github.com/libp2p/go-libp2p/core/peer" routing "github.com/libp2p/go-libp2p/core/routing" + "github.com/stretchr/testify/require" - opts "github.com/ipfs/boxo/coreiface/options/namesys" "github.com/ipfs/boxo/ipns" "github.com/ipfs/boxo/path" ds "github.com/ipfs/go-datastore" @@ -27,7 +27,7 @@ import ( type mockNode struct { h host.Host - id string + id peer.ID privKey ic.PrivKey store ds.Batching dht *dht.IpfsDHT @@ -47,13 +47,11 @@ func getMockNode(t *testing.T, ctx context.Context) *mockNode { return rt, err }), ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return &mockNode{ h: h, - id: h.ID().Pretty(), + id: h.ID(), privKey: h.Peerstore().PrivKey(h.ID()), store: dstore, dht: idht, @@ -72,9 +70,7 @@ func TestRepublish(t *testing.T) { for i := 0; i < 10; i++ { n := getMockNode(t, ctx) ns, err := namesys.NewNameSystem(n.dht, namesys.WithDatastore(n.store)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) nsystems = append(nsystems, ns) nodes = append(nodes, n) @@ -83,17 +79,18 @@ func TestRepublish(t *testing.T) { pinfo := host.InfoFromHost(nodes[0].h) for _, n := range nodes[1:] { - if err := n.h.Connect(ctx, *pinfo); err != nil { - t.Fatal(err) - } + err := n.h.Connect(ctx, *pinfo) + require.NoError(t, err) } // have one node publish a record that is valid for 1 second publisher := nodes[3] - p := path.FromString("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid - rp := namesys.NewIpnsPublisher(publisher.dht, publisher.store) - name := "/ipns/" + publisher.id + p, err := path.NewPath("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid + require.NoError(t, err) + + rp := namesys.NewIPNSPublisher(publisher.dht, publisher.store) + name := ipns.NameFromPeer(publisher.id).AsPath() // Retry in case the record expires before we can fetch it. This can // happen when running the test on a slow machine. @@ -101,10 +98,8 @@ func TestRepublish(t *testing.T) { timeout := time.Second for { expiration = time.Now().Add(time.Second) - err := rp.Publish(ctx, publisher.privKey, p, opts.PublishWithEOL(expiration)) - if err != nil { - t.Fatal(err) - } + err := rp.Publish(ctx, publisher.privKey, p, namesys.PublishWithEOL(expiration)) + require.NoError(t, err) err = verifyResolution(nsystems, name, p) if err == nil { @@ -120,9 +115,8 @@ func TestRepublish(t *testing.T) { // Now wait a second, the records will be invalid and we should fail to resolve time.Sleep(timeout) - if err := verifyResolutionFails(nsystems, name); err != nil { - t.Fatal(err) - } + err = verifyResolutionFails(nsystems, name) + require.NoError(t, err) // The republishers that are contained within the nodes have their timeout set // to 12 hours. Instead of trying to tweak those, we're just going to pretend @@ -138,9 +132,8 @@ func TestRepublish(t *testing.T) { time.Sleep(time.Second * 2) // we should be able to resolve them now - if err := verifyResolution(nsystems, name, p); err != nil { - t.Fatal(err) - } + err = verifyResolution(nsystems, name, p) + require.NoError(t, err) } func TestLongEOLRepublish(t *testing.T) { @@ -154,9 +147,7 @@ func TestLongEOLRepublish(t *testing.T) { for i := 0; i < 10; i++ { n := getMockNode(t, ctx) ns, err := namesys.NewNameSystem(n.dht, namesys.WithDatastore(n.store)) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) nsystems = append(nsystems, ns) nodes = append(nodes, n) @@ -165,27 +156,24 @@ func TestLongEOLRepublish(t *testing.T) { pinfo := host.InfoFromHost(nodes[0].h) for _, n := range nodes[1:] { - if err := n.h.Connect(ctx, *pinfo); err != nil { - t.Fatal(err) - } + err := n.h.Connect(ctx, *pinfo) + require.NoError(t, err) } // have one node publish a record that is valid for 1 second publisher := nodes[3] - p := path.FromString("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") // does not need to be valid - rp := namesys.NewIpnsPublisher(publisher.dht, publisher.store) - name := "/ipns/" + publisher.id + p, err := path.NewPath("/ipfs/QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn") + require.NoError(t, err) + + rp := namesys.NewIPNSPublisher(publisher.dht, publisher.store) + name := ipns.NameFromPeer(publisher.id).AsPath() expiration := time.Now().Add(time.Hour) - err := rp.Publish(ctx, publisher.privKey, p, opts.PublishWithEOL(expiration)) - if err != nil { - t.Fatal(err) - } + err = rp.Publish(ctx, publisher.privKey, p, namesys.PublishWithEOL(expiration)) + require.NoError(t, err) err = verifyResolution(nsystems, name, p) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // The republishers that are contained within the nodes have their timeout set // to 12 hours. Instead of trying to tweak those, we're just going to pretend @@ -201,28 +189,19 @@ func TestLongEOLRepublish(t *testing.T) { time.Sleep(time.Second * 2) err = verifyResolution(nsystems, name, p) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - rec, err := getLastIPNSRecord(ctx, publisher.store, publisher.h.ID()) - if err != nil { - t.Fatal(err) - } + rec, err := getLastIPNSRecord(ctx, publisher.store, ipns.NameFromPeer(publisher.h.ID())) + require.NoError(t, err) finalEol, err := rec.Validity() - if err != nil { - t.Fatal(err) - } - - if !finalEol.Equal(expiration) { - t.Fatal("expiration time modified") - } + require.NoError(t, err) + require.Equal(t, expiration.UTC(), finalEol.UTC()) } -func getLastIPNSRecord(ctx context.Context, dstore ds.Datastore, id peer.ID) (*ipns.Record, error) { +func getLastIPNSRecord(ctx context.Context, dstore ds.Datastore, name ipns.Name) (*ipns.Record, error) { // Look for it locally only - val, err := dstore.Get(ctx, namesys.IpnsDsKey(id)) + val, err := dstore.Get(ctx, namesys.IpnsDsKey(name)) if err != nil { return nil, err } @@ -230,23 +209,23 @@ func getLastIPNSRecord(ctx context.Context, dstore ds.Datastore, id peer.ID) (*i return ipns.UnmarshalRecord(val) } -func verifyResolution(nsystems []namesys.NameSystem, key string, exp path.Path) error { +func verifyResolution(nsystems []namesys.NameSystem, key path.Path, exp path.Path) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() for _, n := range nsystems { - val, err := n.Resolve(ctx, key) + res, err := n.Resolve(ctx, key) if err != nil { return err } - if val != exp { + if res.Path.String() != exp.String() { return errors.New("resolved wrong record") } } return nil } -func verifyResolutionFails(nsystems []namesys.NameSystem, key string) error { +func verifyResolutionFails(nsystems []namesys.NameSystem, key path.Path) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() for _, n := range nsystems { diff --git a/namesys/resolve/resolve.go b/namesys/resolve/resolve.go deleted file mode 100644 index b2acf0602..000000000 --- a/namesys/resolve/resolve.go +++ /dev/null @@ -1,56 +0,0 @@ -package resolve - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/ipfs/boxo/path" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/ipfs/boxo/namesys" -) - -// ErrNoNamesys is an explicit error for when an IPFS node doesn't -// (yet) have a name system -var ErrNoNamesys = errors.New( - "core/resolve: no Namesys on IpfsNode - can't resolve ipns entry") - -// ResolveIPNS resolves /ipns paths -func ResolveIPNS(ctx context.Context, nsys namesys.NameSystem, p path.Path) (path.Path, error) { - ctx, span := namesys.StartSpan(ctx, "ResolveIPNS", trace.WithAttributes(attribute.String("Path", p.String()))) - defer span.End() - if strings.HasPrefix(p.String(), "/ipns/") { - // TODO(cryptix): we should be able to query the local cache for the path - if nsys == nil { - return "", ErrNoNamesys - } - - seg := p.Segments() - - if len(seg) < 2 || seg[1] == "" { // just "/" without further segments - err := fmt.Errorf("invalid path %q: ipns path missing IPNS ID", p) - return "", err - } - - extensions := seg[2:] - resolvable, err := path.FromSegments("/", seg[0], seg[1]) - if err != nil { - return "", err - } - - respath, err := nsys.Resolve(ctx, resolvable.String()) - if err != nil { - return "", err - } - - segments := append(respath.Segments(), extensions...) - p, err = path.FromSegments("/", segments...) - if err != nil { - return "", err - } - } - return p, nil -} diff --git a/namesys/resolve_test.go b/namesys/resolve_test.go deleted file mode 100644 index 3aecdccaf..000000000 --- a/namesys/resolve_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package namesys - -import ( - "context" - "errors" - "testing" - "time" - - ipns "github.com/ipfs/boxo/ipns" - path "github.com/ipfs/boxo/path" - mockrouting "github.com/ipfs/boxo/routing/mock" - ds "github.com/ipfs/go-datastore" - dssync "github.com/ipfs/go-datastore/sync" - tnet "github.com/libp2p/go-libp2p-testing/net" -) - -func TestRoutingResolve(t *testing.T) { - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - serv := mockrouting.NewServer() - id := tnet.RandIdentityOrFatal(t) - d := serv.ClientWithDatastore(context.Background(), id, dstore) - - resolver := NewIpnsResolver(d) - publisher := NewIpnsPublisher(d, dstore) - - identity := tnet.RandIdentityOrFatal(t) - - h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") - err := publisher.Publish(context.Background(), identity.PrivateKey(), h) - if err != nil { - t.Fatal(err) - } - - res, err := resolver.Resolve(context.Background(), identity.ID().Pretty()) - if err != nil { - t.Fatal(err) - } - - if res != h { - t.Fatal("Got back incorrect value.") - } -} - -func TestPrexistingExpiredRecord(t *testing.T) { - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - d := mockrouting.NewServer().ClientWithDatastore(context.Background(), tnet.RandIdentityOrFatal(t), dstore) - - resolver := NewIpnsResolver(d) - publisher := NewIpnsPublisher(d, dstore) - - identity := tnet.RandIdentityOrFatal(t) - - // Make an expired record and put it in the datastore - h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") - eol := time.Now().Add(time.Hour * -1) - - entry, err := ipns.NewRecord(identity.PrivateKey(), h, 0, eol, 0) - if err != nil { - t.Fatal(err) - } - err = PutRecordToRouting(context.Background(), d, identity.PublicKey(), entry) - if err != nil { - t.Fatal(err) - } - - // Now, with an old record in the system already, try and publish a new one - err = publisher.Publish(context.Background(), identity.PrivateKey(), h) - if err != nil { - t.Fatal(err) - } - - err = verifyCanResolve(resolver, identity.ID().Pretty(), h) - if err != nil { - t.Fatal(err) - } -} - -func TestPrexistingRecord(t *testing.T) { - dstore := dssync.MutexWrap(ds.NewMapDatastore()) - d := mockrouting.NewServer().ClientWithDatastore(context.Background(), tnet.RandIdentityOrFatal(t), dstore) - - resolver := NewIpnsResolver(d) - publisher := NewIpnsPublisher(d, dstore) - - identity := tnet.RandIdentityOrFatal(t) - - // Make a good record and put it in the datastore - h := path.FromString("/ipfs/QmZULkCELmmk5XNfCgTnCyFgAVxBRBXyDHGGMVoLFLiXEN") - eol := time.Now().Add(time.Hour) - entry, err := ipns.NewRecord(identity.PrivateKey(), h, 0, eol, 0) - if err != nil { - t.Fatal(err) - } - err = PutRecordToRouting(context.Background(), d, identity.PublicKey(), entry) - if err != nil { - t.Fatal(err) - } - - // Now, with an old record in the system already, try and publish a new one - err = publisher.Publish(context.Background(), identity.PrivateKey(), h) - if err != nil { - t.Fatal(err) - } - - err = verifyCanResolve(resolver, identity.ID().Pretty(), h) - if err != nil { - t.Fatal(err) - } -} - -func verifyCanResolve(r Resolver, name string, exp path.Path) error { - res, err := r.Resolve(context.Background(), name) - if err != nil { - return err - } - - if res != exp { - return errors.New("got back wrong record") - } - - return nil -} diff --git a/namesys/routing.go b/namesys/routing.go deleted file mode 100644 index 6b706bd92..000000000 --- a/namesys/routing.go +++ /dev/null @@ -1,147 +0,0 @@ -package namesys - -import ( - "context" - "strings" - "time" - - opts "github.com/ipfs/boxo/coreiface/options/namesys" - "github.com/ipfs/boxo/ipns" - "github.com/ipfs/boxo/path" - logging "github.com/ipfs/go-log/v2" - dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p/core/peer" - "github.com/libp2p/go-libp2p/core/routing" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" -) - -var log = logging.Logger("namesys") - -// IpnsResolver implements NSResolver for the main IPFS SFS-like naming -type IpnsResolver struct { - routing routing.ValueStore -} - -// NewIpnsResolver constructs a name resolver using the IPFS Routing system -// to implement SFS-like naming on top. -func NewIpnsResolver(route routing.ValueStore) *IpnsResolver { - if route == nil { - panic("attempt to create resolver with nil routing system") - } - return &IpnsResolver{ - routing: route, - } -} - -// Resolve implements Resolver. -func (r *IpnsResolver) Resolve(ctx context.Context, name string, options ...opts.ResolveOpt) (path.Path, error) { - ctx, span := StartSpan(ctx, "IpnsResolver.Resolve", trace.WithAttributes(attribute.String("Name", name))) - defer span.End() - return resolve(ctx, r, name, opts.ProcessOpts(options)) -} - -// ResolveAsync implements Resolver. -func (r *IpnsResolver) ResolveAsync(ctx context.Context, name string, options ...opts.ResolveOpt) <-chan Result { - ctx, span := StartSpan(ctx, "IpnsResolver.ResolveAsync", trace.WithAttributes(attribute.String("Name", name))) - defer span.End() - return resolveAsync(ctx, r, name, opts.ProcessOpts(options)) -} - -// resolveOnce implements resolver. Uses the IPFS routing system to -// resolve SFS-like names. -func (r *IpnsResolver) resolveOnceAsync(ctx context.Context, name string, options opts.ResolveOpts) <-chan onceResult { - ctx, span := StartSpan(ctx, "IpnsResolver.ResolveOnceAsync", trace.WithAttributes(attribute.String("Name", name))) - defer span.End() - - out := make(chan onceResult, 1) - log.Debugf("RoutingResolver resolving %s", name) - cancel := func() {} - - if options.DhtTimeout != 0 { - // Resolution must complete within the timeout - ctx, cancel = context.WithTimeout(ctx, options.DhtTimeout) - } - - name = strings.TrimPrefix(name, "/ipns/") - - pid, err := peer.Decode(name) - if err != nil { - log.Debugf("RoutingResolver: could not convert public key hash %s to peer ID: %s\n", name, err) - out <- onceResult{err: err} - close(out) - cancel() - return out - } - - // Use the routing system to get the name. - // Note that the DHT will call the ipns validator when retrieving - // the value, which in turn verifies the ipns record signature - ipnsKey := string(ipns.NameFromPeer(pid).RoutingKey()) - - vals, err := r.routing.SearchValue(ctx, ipnsKey, dht.Quorum(int(options.DhtRecordCount))) - if err != nil { - log.Debugf("RoutingResolver: dht get for name %s failed: %s", name, err) - out <- onceResult{err: err} - close(out) - cancel() - return out - } - - go func() { - defer cancel() - defer close(out) - ctx, span := StartSpan(ctx, "IpnsResolver.ResolveOnceAsync.Worker") - defer span.End() - - for { - select { - case val, ok := <-vals: - if !ok { - return - } - - rec, err := ipns.UnmarshalRecord(val) - if err != nil { - log.Debugf("RoutingResolver: could not unmarshal value for name %s: %s", name, err) - emitOnceResult(ctx, out, onceResult{err: err}) - return - } - - p, err := rec.Value() - if err != nil { - emitOnceResult(ctx, out, onceResult{err: err}) - return - } - - ttl := DefaultResolverCacheTTL - if recordTTL, err := rec.TTL(); err == nil { - ttl = recordTTL - } - - switch eol, err := rec.Validity(); err { - case ipns.ErrUnrecognizedValidity: - // No EOL. - case nil: - ttEol := time.Until(eol) - if ttEol < 0 { - // It *was* valid when we first resolved it. - ttl = 0 - } else if ttEol < ttl { - ttl = ttEol - } - default: - log.Errorf("encountered error when parsing EOL: %s", err) - emitOnceResult(ctx, out, onceResult{err: err}) - return - } - - emitOnceResult(ctx, out, onceResult{value: path.Path(p.String()), ttl: ttl}) - case <-ctx.Done(): - return - } - } - }() - - return out -} diff --git a/namesys/tracing.go b/namesys/tracing.go deleted file mode 100644 index 4ef84294a..000000000 --- a/namesys/tracing.go +++ /dev/null @@ -1,13 +0,0 @@ -package namesys - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return otel.Tracer("go-namesys").Start(ctx, fmt.Sprintf("Namesys.%s", name)) -} diff --git a/namesys/utilities.go b/namesys/utilities.go new file mode 100644 index 000000000..cba7ee728 --- /dev/null +++ b/namesys/utilities.go @@ -0,0 +1,146 @@ +package namesys + +import ( + "context" + "fmt" + "strings" + + "github.com/ipfs/boxo/path" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" +) + +type resolver interface { + resolveOnceAsync(context.Context, path.Path, ResolveOptions) <-chan AsyncResult +} + +// resolve is a helper for implementing Resolver.ResolveN using resolveOnce. +func resolve(ctx context.Context, r resolver, p path.Path, options ResolveOptions) (result Result, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + err = ErrResolveFailed + resCh := resolveAsync(ctx, r, p, options) + + for res := range resCh { + result.Path, result.TTL, result.LastMod, err = res.Path, res.TTL, res.LastMod, res.Err + if err != nil { + break + } + } + + return result, err +} + +func resolveAsync(ctx context.Context, r resolver, p path.Path, options ResolveOptions) <-chan AsyncResult { + ctx, span := startSpan(ctx, "ResolveAsync") + defer span.End() + + resCh := r.resolveOnceAsync(ctx, p, options) + depth := options.Depth + outCh := make(chan AsyncResult, 1) + + go func() { + defer close(outCh) + ctx, span := startSpan(ctx, "ResolveAsync.Worker") + defer span.End() + + var subCh <-chan AsyncResult + var cancelSub context.CancelFunc + defer func() { + if cancelSub != nil { + cancelSub() + } + }() + + for { + select { + case res, ok := <-resCh: + if !ok { + resCh = nil + break + } + + if res.Err != nil { + emitResult(ctx, outCh, res) + return + } + + log.Debugf("resolved %s to %s", p.String(), res.Path.String()) + + if !res.Path.Mutable() { + emitResult(ctx, outCh, res) + break + } + + if depth == 1 { + res.Err = ErrResolveRecursion + emitResult(ctx, outCh, res) + break + } + + subOpts := options + if subOpts.Depth > 1 { + subOpts.Depth-- + } + + var subCtx context.Context + if cancelSub != nil { + // Cancel previous recursive resolve since it won't be used anyways + cancelSub() + } + + subCtx, cancelSub = context.WithCancel(ctx) + _ = cancelSub + + subCh = resolveAsync(subCtx, r, res.Path, subOpts) + case res, ok := <-subCh: + if !ok { + subCh = nil + break + } + + // We don't bother returning here in case of context timeout as there is + // no good reason to do that, and we may still be able to emit a result + emitResult(ctx, outCh, res) + case <-ctx.Done(): + return + } + if resCh == nil && subCh == nil { + return + } + } + }() + return outCh +} + +func emitResult(ctx context.Context, outCh chan<- AsyncResult, r AsyncResult) { + select { + case outCh <- r: + case <-ctx.Done(): + } +} + +func joinPaths(resolvedBase, unresolvedPath path.Path) (path.Path, error) { + if resolvedBase == nil { + return nil, nil + } + + segments := unresolvedPath.Segments()[2:] + if strings.HasSuffix(unresolvedPath.String(), "/") { + segments = append(segments, "") + } + + // simple optimization + if len(segments) == 0 { + return resolvedBase, nil + } + + return path.Join(resolvedBase, segments...) +} + +var tracer = otel.Tracer("boxo/namesys") + +func startSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return tracer.Start(ctx, fmt.Sprintf("Namesys.%s", name)) +} diff --git a/path/error.go b/path/error.go index dafc446b5..0863c7cf4 100644 --- a/path/error.go +++ b/path/error.go @@ -1,25 +1,32 @@ package path import ( + "errors" "fmt" ) +var ( + ErrExpectedImmutable = errors.New("path was expected to be immutable") + ErrInsufficientComponents = errors.New("path does not have enough components") + ErrUnknownNamespace = errors.New("unknown namespace") +) + type ErrInvalidPath struct { - error error - path string + err error + path string } -func (e ErrInvalidPath) Error() string { - return fmt.Sprintf("invalid path %q: %s", e.path, e.error) +func (e *ErrInvalidPath) Error() string { + return fmt.Sprintf("invalid path %q: %s", e.path, e.err) } -func (e ErrInvalidPath) Unwrap() error { - return e.error +func (e *ErrInvalidPath) Unwrap() error { + return e.err } -func (e ErrInvalidPath) Is(err error) bool { +func (e *ErrInvalidPath) Is(err error) bool { switch err.(type) { - case ErrInvalidPath: + case *ErrInvalidPath: return true default: return false diff --git a/path/error_test.go b/path/error_test.go index 07aab6408..2b5f92945 100644 --- a/path/error_test.go +++ b/path/error_test.go @@ -6,11 +6,7 @@ import ( ) func TestErrorIs(t *testing.T) { - if !errors.Is(ErrInvalidPath{path: "foo", error: errors.New("bar")}, ErrInvalidPath{}) { - t.Fatal("error must be error") - } - - if !errors.Is(&ErrInvalidPath{path: "foo", error: errors.New("bar")}, ErrInvalidPath{}) { + if !errors.Is(&ErrInvalidPath{path: "foo", err: errors.New("bar")}, &ErrInvalidPath{}) { t.Fatal("pointer to error must be error") } } diff --git a/path/internal/tracing.go b/path/internal/tracing.go deleted file mode 100644 index f9eda2f92..000000000 --- a/path/internal/tracing.go +++ /dev/null @@ -1,13 +0,0 @@ -package internal - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - return otel.Tracer("go-path").Start(ctx, fmt.Sprintf("Path.%s", name), opts...) -} diff --git a/path/path.go b/path/path.go index a9b36c3ce..8b67af4d0 100644 --- a/path/path.go +++ b/path/path.go @@ -3,187 +3,213 @@ package path import ( "fmt" - "path" + gopath "path" "strings" - cid "github.com/ipfs/go-cid" + "github.com/ipfs/go-cid" ) -// A Path represents an ipfs content path: -// - /path/to/file -// - /ipfs/ -// - /ipns//path/to/folder -// - etc -type Path string - -// ^^^ -// TODO: debate making this a private struct wrapped in a public interface -// would allow us to control creation, and cache segments. +const ( + IPFSNamespace = "ipfs" + IPNSNamespace = "ipns" + IPLDNamespace = "ipld" +) -// FromString safely converts a string type to a Path type. -func FromString(s string) Path { - return Path(s) +// Path is a generic, valid, and well-formed path. A valid path is shaped as follows: +// +// /{namespace}/{root}[/remaining/path] +// +// Where: +// +// 1. Namespace is "ipfs", "ipld", or "ipns". +// 2. If namespace is "ipfs" or "ipld", "root" must be a valid [cid.Cid]. +// 3. If namespace is "ipns", "root" may be a [ipns.Name] or a [DNSLink] FQDN. +// +// [DNSLink]: https://dnslink.dev/ +type Path interface { + // String returns the path as a string. + String() string + + // Namespace returns the first component of the path. For example, the namespace + // of "/ipfs/bafy" is "ipfs". + Namespace() string + + // Mutable returns false if the data under this path's namespace is guaranteed to not change. + Mutable() bool + + // Segments returns the different elements of a path delimited by a forward + // slash ("/"). The returned array must not contain any empty segments, and + // must have a length of at least two: the first element must be the namespace, + // and the second must be root. + // + // Examples: + // - "/ipld/bafkqaaa" returns ["ipld", "bafkqaaa"] + // - "/ipfs/bafkqaaa/a/b/" returns ["ipfs", "bafkqaaa", "a", "b"] + // - "/ipns/dnslink.net" returns ["ipns", "dnslink.net"] + Segments() []string } -// FromCid safely converts a cid.Cid type to a Path type. -func FromCid(c cid.Cid) Path { - return Path("/ipfs/" + c.String()) +var _ Path = path{} + +type path struct { + str string + namespace string } -// Segments returns the different elements of a path -// (elements are delimited by a /). -func (p Path) Segments() []string { - cleaned := path.Clean(string(p)) - segments := strings.Split(cleaned, "/") +func (p path) String() string { + return p.str +} - // Ignore leading slash - if len(segments[0]) == 0 { - segments = segments[1:] - } +func (p path) Namespace() string { + return p.namespace +} - return segments +func (p path) Mutable() bool { + return p.Namespace() != IPFSNamespace && p.Namespace() != IPLDNamespace } -// String converts a path to string. -func (p Path) String() string { - return string(p) +func (p path) Segments() []string { + return StringToSegments(p.str) } -// IsJustAKey returns true if the path is of the form or /ipfs/, or -// /ipld/ -func (p Path) IsJustAKey() bool { - parts := p.Segments() - return len(parts) == 2 && (parts[0] == "ipfs" || parts[0] == "ipld") +// ImmutablePath is a [Path] which is guaranteed to have an immutable [Namespace]. +type ImmutablePath struct { + path Path + rootCid cid.Cid } -// PopLastSegment returns a new Path without its final segment, and the final -// segment, separately. If there is no more to pop (the path is just a key), -// the original path is returned. -func (p Path) PopLastSegment() (Path, string, error) { - if p.IsJustAKey() { - return p, "", nil +var _ Path = ImmutablePath{} + +func NewImmutablePath(p Path) (ImmutablePath, error) { + if p.Mutable() { + return ImmutablePath{}, &ErrInvalidPath{err: ErrExpectedImmutable, path: p.String()} } - segs := p.Segments() - newPath, err := ParsePath("/" + strings.Join(segs[:len(segs)-1], "/")) + segments := p.Segments() + cid, err := cid.Decode(segments[1]) if err != nil { - return "", "", err + return ImmutablePath{}, &ErrInvalidPath{err: err, path: p.String()} } - return newPath, segs[len(segs)-1], nil + return ImmutablePath{path: p, rootCid: cid}, nil } -// FromSegments returns a path given its different segments. -func FromSegments(prefix string, seg ...string) (Path, error) { - return ParsePath(prefix + strings.Join(seg, "/")) +func (ip ImmutablePath) String() string { + return ip.path.String() } -// ParsePath returns a well-formed ipfs Path. -// The returned path will always be prefixed with /ipfs/ or /ipns/. -// The prefix will be added if not present in the given string. -// This function will return an error when the given string is -// not a valid ipfs path. -func ParsePath(txt string) (Path, error) { - parts := strings.Split(txt, "/") - if len(parts) == 1 { - kp, err := ParseCidToPath(txt) - if err == nil { - return kp, nil - } - } +func (ip ImmutablePath) Namespace() string { + return ip.path.Namespace() +} - // if the path doesnt begin with a '/' - // we expect this to start with a hash, and be an 'ipfs' path - if parts[0] != "" { - if _, err := decodeCid(parts[0]); err != nil { - return "", &ErrInvalidPath{error: err, path: txt} - } - // The case when the path starts with hash without a protocol prefix - return Path("/ipfs/" + txt), nil - } +func (ip ImmutablePath) Mutable() bool { + return false +} - if len(parts) < 3 { - return "", &ErrInvalidPath{error: fmt.Errorf("invalid ipfs path"), path: txt} - } +func (ip ImmutablePath) Segments() []string { + return ip.path.Segments() +} - // TODO: make this smarter - switch parts[1] { - case "ipfs", "ipld": - if parts[2] == "" { - return "", &ErrInvalidPath{error: fmt.Errorf("not enough path components"), path: txt} - } - // Validate Cid. - _, err := decodeCid(parts[2]) - if err != nil { - return "", &ErrInvalidPath{error: fmt.Errorf("invalid CID: %w", err), path: txt} - } - case "ipns": - if parts[2] == "" { - return "", &ErrInvalidPath{error: fmt.Errorf("not enough path components"), path: txt} - } - default: - return "", &ErrInvalidPath{error: fmt.Errorf("unknown namespace %q", parts[1]), path: txt} - } +func (ip ImmutablePath) RootCid() cid.Cid { + return ip.rootCid +} - return Path(txt), nil +// FromCid returns a new "/ipfs" path with the provided CID. +func FromCid(cid cid.Cid) ImmutablePath { + return ImmutablePath{ + path: path{ + str: fmt.Sprintf("/%s/%s", IPFSNamespace, cid.String()), + namespace: IPFSNamespace, + }, + rootCid: cid, + } } -// ParseCidToPath takes a CID in string form and returns a valid ipfs Path. -func ParseCidToPath(txt string) (Path, error) { - if txt == "" { - return "", &ErrInvalidPath{error: fmt.Errorf("empty"), path: txt} +// NewPath takes the given string and returns a well-formed and sanitized [Path]. +// The given string is cleaned through [gopath.Clean], but preserving the final +// trailing slash. This function returns an error when the given string is not +// a valid content path. +func NewPath(str string) (Path, error) { + segments := StringToSegments(str) + + // Shortest valid path is "/{namespace}/{root}". That yields at least two + // segments: ["{namespace}" "{root}"]. Therefore, here we check if the original + // string begins with "/" (any path must), if we have at least two segments, and if + // the root is non-empty. The namespace is checked further below. + if !strings.HasPrefix(str, "/") || len(segments) < 2 || segments[1] == "" { + return nil, &ErrInvalidPath{err: ErrInsufficientComponents, path: str} } - c, err := decodeCid(txt) - if err != nil { - return "", &ErrInvalidPath{error: err, path: txt} + cleaned := SegmentsToString(segments...) + if strings.HasSuffix(str, "/") { + // Do not forget to preserve the trailing slash! + cleaned += "/" } - return FromCid(c), nil -} + switch segments[0] { + case IPFSNamespace, IPLDNamespace: + cid, err := cid.Decode(segments[1]) + if err != nil { + return nil, &ErrInvalidPath{err: err, path: str} + } -// IsValid checks if a path is a valid ipfs Path. -func (p *Path) IsValid() error { - _, err := ParsePath(p.String()) - return err + return ImmutablePath{ + path: path{ + str: cleaned, + namespace: segments[0], + }, + rootCid: cid, + }, nil + case "ipns": + return path{ + str: cleaned, + namespace: segments[0], + }, nil + default: + return nil, &ErrInvalidPath{err: fmt.Errorf("%w: %q", ErrUnknownNamespace, segments[0]), path: str} + } } -// Join joins strings slices using / -func Join(pths []string) string { - return strings.Join(pths, "/") +// NewPathFromSegments creates a new [Path] from the provided segments. This +// function simply calls [NewPath] internally with the segments concatenated +// using a forward slash "/" as separator. Please see [Path.Segments] for more +// information about how segments must be structured. +func NewPathFromSegments(segments ...string) (Path, error) { + return NewPath(SegmentsToString(segments...)) } -// SplitList splits strings usings / -func SplitList(pth string) []string { - return strings.Split(pth, "/") +// Join joins a [Path] with certain segments and returns a new [Path]. +func Join(p Path, segments ...string) (Path, error) { + s := p.Segments() + s = append(s, segments...) + return NewPathFromSegments(s...) } -// SplitAbsPath clean up and split fpath. It extracts the first component (which -// must be a Multihash) and return it separately. -func SplitAbsPath(fpath Path) (cid.Cid, []string, error) { - parts := fpath.Segments() - if parts[0] == "ipfs" || parts[0] == "ipld" { - parts = parts[1:] +// SegmentsToString converts an array of segments into a string. The returned string +// will always be prefixed with a "/" if there are any segments. For example, if the +// given segments array is ["foo", "bar"], the returned value will be "/foo/bar". +// Given an empty array, an empty string is returned. +func SegmentsToString(segments ...string) string { + str := strings.Join(segments, "/") + if str != "" { + str = "/" + str } - - // if nothing, bail. - if len(parts) == 0 { - return cid.Cid{}, nil, &ErrInvalidPath{error: fmt.Errorf("empty"), path: string(fpath)} - } - - c, err := decodeCid(parts[0]) - // first element in the path is a cid - if err != nil { - return cid.Cid{}, nil, &ErrInvalidPath{error: fmt.Errorf("invalid CID: %w", err), path: string(fpath)} - } - - return c, parts[1:], nil + return str } -func decodeCid(cstr string) (cid.Cid, error) { - c, err := cid.Decode(cstr) - if err != nil && len(cstr) == 46 && cstr[:2] == "qm" { // https://github.com/ipfs/go-ipfs/issues/7792 - return cid.Cid{}, fmt.Errorf("%v (possible lowercased CIDv0; consider converting to a case-agnostic CIDv1, such as base32)", err) +// StringToSegments converts a string into an array of segments. This function follows +// the rules of [Path.Segments]: the path is first cleaned through [gopath.Clean] and +// no empty segments are returned. +func StringToSegments(str string) []string { + str = gopath.Clean(str) + if str == "." { + return nil + } + // Trim slashes from beginning and end, such that we do not return empty segments. + str = strings.TrimSuffix(str, "/") + str = strings.TrimPrefix(str, "/") + if str == "" { + return nil } - return c, err + return strings.Split(str, "/") } diff --git a/path/path_test.go b/path/path_test.go index 2b26a5678..b432f11a9 100644 --- a/path/path_test.go +++ b/path/path_test.go @@ -1,128 +1,305 @@ package path import ( - "strings" + "fmt" "testing" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" ) -func TestPathParsing(t *testing.T) { - cases := map[string]bool{ - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": true, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, - "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": true, - "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, - "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, - "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f": true, - "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - "/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, - "/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": false, - "/ipfs/foo": false, - "/ipfs/": false, - "ipfs/": false, - "ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, - "/ipld/foo": false, - "/ipld/": false, - "ipld/": false, - "ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, +func newIPLDPath(cid cid.Cid) ImmutablePath { + return ImmutablePath{ + path: path{ + str: fmt.Sprintf("/%s/%s", IPLDNamespace, cid.String()), + namespace: IPLDNamespace, + }, + rootCid: cid, } +} + +func TestNewPath(t *testing.T) { + t.Parallel() + + t.Run("Valid Paths", func(t *testing.T) { + t.Parallel() + + testCases := []struct { + src string + canonical string + namespace string + mutable bool + }{ + // IPFS CIDv0 + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", IPFSNamespace, false}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", IPFSNamespace, false}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f", "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f", IPFSNamespace, false}, + + // IPFS CIDv1 + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", IPFSNamespace, false}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", IPFSNamespace, false}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f", "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f", IPFSNamespace, false}, + + // IPLD CIDv0 + {"/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", IPLDNamespace, false}, + {"/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", IPLDNamespace, false}, + {"/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f", "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f", IPLDNamespace, false}, - for p, expected := range cases { - _, err := ParsePath(p) - valid := err == nil - if valid != expected { - t.Fatalf("expected %s to have valid == %t", p, expected) + // IPLD CIDv1 + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", "/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", IPLDNamespace, false}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", "/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", IPLDNamespace, false}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f", "/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f", IPLDNamespace, false}, + + // IPNS CIDv0 + {"/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", IPNSNamespace, true}, + {"/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", IPNSNamespace, true}, + {"/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f", "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/c/d/e/f", IPNSNamespace, true}, + + // IPNS CIDv1 + {"/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", "/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", IPNSNamespace, true}, + {"/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", "/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", IPNSNamespace, true}, + {"/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f", "/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f", IPNSNamespace, true}, + + // IPNS DNSLink + {"/ipns/domain.net", "/ipns/domain.net", IPNSNamespace, true}, + {"/ipns/domain.net/a/b/c/d", "/ipns/domain.net/a/b/c/d", IPNSNamespace, true}, + + // Cleaning checks + {"/ipfs/bafkqaaa/", "/ipfs/bafkqaaa/", IPFSNamespace, false}, + {"/ipfs/bafkqaaa//", "/ipfs/bafkqaaa/", IPFSNamespace, false}, + {"/ipfs///bafkqaaa//", "/ipfs/bafkqaaa/", IPFSNamespace, false}, + {"/ipfs///bafkqaaa/a/b/../c", "/ipfs/bafkqaaa/a/c", IPFSNamespace, false}, + {"/ipfs///bafkqaaa/a/b/../c/", "/ipfs/bafkqaaa/a/c/", IPFSNamespace, false}, } - } -} -func TestNoComponents(t *testing.T) { - for _, s := range []string{ - "/ipfs/", - "/ipns/", - "/ipld/", - } { - _, err := ParsePath(s) - if err == nil || !strings.Contains(err.Error(), "not enough path components") || !strings.Contains(err.Error(), s) { - t.Error("wrong error") + for _, testCase := range testCases { + p, err := NewPath(testCase.src) + assert.NoError(t, err) + assert.Equal(t, testCase.canonical, p.String()) + assert.Equal(t, testCase.namespace, p.Namespace()) + assert.Equal(t, testCase.mutable, p.Mutable()) } - } -} + }) + + t.Run("Invalid Paths", func(t *testing.T) { + t.Parallel() -func TestInvalidPaths(t *testing.T) { - for _, s := range []string{ - "/ipfs", - "/testfs", - "/", - } { - _, err := ParsePath(s) - if err == nil || !strings.Contains(err.Error(), "invalid ipfs path") || !strings.Contains(err.Error(), s) { - t.Error("wrong error") + testCases := []struct { + src string + err error + }{ + {"QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ErrInsufficientComponents}, + {"QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", ErrInsufficientComponents}, + {"bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a", ErrInsufficientComponents}, + {"/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ErrInsufficientComponents}, + {"/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", ErrUnknownNamespace}, + {"/ipfs/foo", cid.ErrInvalidCid{}}, + {"/ipfs/", ErrInsufficientComponents}, + {"ipfs/", ErrInsufficientComponents}, + {"ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ErrInsufficientComponents}, + {"/ipld/foo", &ErrInvalidPath{}}, + {"/ipld/", ErrInsufficientComponents}, + {"ipld/", ErrInsufficientComponents}, + {"ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ErrInsufficientComponents}, + {"/ipns", ErrInsufficientComponents}, + {"/ipfs/", ErrInsufficientComponents}, + {"/ipns/", ErrInsufficientComponents}, + {"/ipld/", ErrInsufficientComponents}, + {"/ipfs", ErrInsufficientComponents}, + {"/testfs", ErrInsufficientComponents}, + {"/", ErrInsufficientComponents}, } - } -} -func TestIsJustAKey(t *testing.T) { - cases := map[string]bool{ - "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": false, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b": false, - "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": false, - "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b": false, - "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": true, - } + for _, testCase := range testCases { + _, err := NewPath(testCase.src) + assert.ErrorIs(t, err, testCase.err) + assert.ErrorIs(t, err, &ErrInvalidPath{}) // Always an ErrInvalidPath! + } + }) + + t.Run("Returns ImmutablePath for IPFS and IPLD Paths", func(t *testing.T) { + t.Parallel() - for p, expected := range cases { - path, err := ParsePath(p) - if err != nil { - t.Fatalf("ParsePath failed to parse \"%s\", but should have succeeded", p) + testCases := []struct { + src string + }{ + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f"}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a"}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/c/d/e/f"}, } - result := path.IsJustAKey() - if result != expected { - t.Fatalf("expected IsJustAKey(%s) to return %v, not %v", p, expected, result) + + for _, testCase := range testCases { + p, err := NewPath(testCase.src) + assert.NoError(t, err) + assert.IsType(t, ImmutablePath{}, p) } - } + }) } -func TestPopLastSegment(t *testing.T) { - cases := map[string][]string{ - "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ""}, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", ""}, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "a"}, - "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b": {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a", "b"}, - "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y/z": {"/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y", "z"}, - "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y/z": {"/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/x/y", "z"}, - } +func TestFromCid(t *testing.T) { + t.Parallel() - for p, expected := range cases { - path, err := ParsePath(p) - if err != nil { - t.Fatalf("ParsePath failed to parse \"%s\", but should have succeeded", p) - } - head, tail, err := path.PopLastSegment() - if err != nil { - t.Fatalf("PopLastSegment failed, but should have succeeded: %s", err) + t.Run("Works with CIDv0", func(t *testing.T) { + t.Parallel() + + c, err := cid.Decode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") + assert.NoError(t, err) + + p := FromCid(c) + assert.IsType(t, ImmutablePath{}, p) + assert.Equal(t, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", p.String()) + assert.Equal(t, c, p.RootCid()) + }) + + t.Run("Works with CIDv1", func(t *testing.T) { + t.Parallel() + + c, err := cid.Decode("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku") + assert.NoError(t, err) + + p := FromCid(c) + assert.IsType(t, ImmutablePath{}, p) + assert.Equal(t, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", p.String()) + assert.Equal(t, c, p.RootCid()) + }) + + t.Run("newIPLDPath returns correct ImmutablePath", func(t *testing.T) { + c, err := cid.Decode("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n") + assert.NoError(t, err) + + p := newIPLDPath(c) + assert.IsType(t, ImmutablePath{}, p) + assert.Equal(t, "/ipld/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", p.String()) + assert.Equal(t, c, p.RootCid()) + + // Check if CID encoding is preserved. + c, err = cid.Decode("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku") + assert.NoError(t, err) + + p = newIPLDPath(c) + assert.IsType(t, ImmutablePath{}, p) + assert.Equal(t, "/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", p.String()) + assert.Equal(t, c, p.RootCid()) + }) +} + +func TestNewImmutablePath(t *testing.T) { + t.Parallel() + + t.Run("Fails on Mutable Path", func(t *testing.T) { + for _, path := range []string{ + "/ipns/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", + "/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", + "/ipns/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/with/path", + "/ipns/domain.net", + } { + p, err := NewPath(path) + assert.NoError(t, err) + + _, err = NewImmutablePath(p) + assert.ErrorIs(t, err, ErrExpectedImmutable) + assert.ErrorIs(t, err, &ErrInvalidPath{}) } - headStr := head.String() - if headStr != expected[0] { - t.Fatalf("expected head of PopLastSegment(%s) to return %v, not %v", p, expected[0], headStr) + }) + + t.Run("Succeeds on Immutable Path", func(t *testing.T) { + testCases := []struct { + path string + cid cid.Cid + }{ + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", cid.MustParse("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b", cid.MustParse("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/", cid.MustParse("QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n")}, + + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", cid.MustParse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b", cid.MustParse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/", cid.MustParse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")}, + + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", cid.MustParse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b", cid.MustParse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")}, + {"/ipld/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/", cid.MustParse("bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku")}, } - if tail != expected[1] { - t.Fatalf("expected tail of PopLastSegment(%s) to return %v, not %v", p, expected[1], tail) + + for _, testCase := range testCases { + p, err := NewPath(testCase.path) + assert.NoError(t, err) + + ip, err := NewImmutablePath(p) + assert.NoError(t, err) + assert.Equal(t, testCase.path, ip.String()) + assert.Equal(t, testCase.cid, ip.RootCid()) } + }) +} + +func TestJoin(t *testing.T) { + t.Parallel() + + testCases := []struct { + path string + segments []string + expected string + }{ + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", []string{"a/b"}, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b"}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", []string{"/a/b"}, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b"}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/", []string{"/a/b"}, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b"}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", []string{"a", "b"}, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b"}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", []string{"a/b/../"}, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/"}, + {"/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", []string{"a/b", "/"}, "/ipfs/QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n/a/b/"}, + + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", []string{"a/b"}, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", []string{"/a/b"}, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/", []string{"/a/b"}, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", []string{"a", "b"}, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", []string{"a/b/../"}, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/"}, + {"/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", []string{"a/b", "/"}, "/ipfs/bafybeihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku/a/b/"}, + } + + for _, testCase := range testCases { + p, err := NewPath(testCase.path) + assert.NoError(t, err) + jp, err := Join(p, testCase.segments...) + assert.NoError(t, err) + assert.Equal(t, testCase.expected, jp.String()) } } -func TestV0ErrorDueToLowercase(t *testing.T) { - badb58 := "/ipfs/qmbwqxbekc3p8tqskc98xmwnzrzdtrlmimpl8wbutgsmnr" - _, err := ParsePath(badb58) - if err == nil { - t.Fatal("should have failed to decode") +func TestStringToSegments(t *testing.T) { + testCases := []struct { + str string + expected []string + }{ + {"", nil}, + {"/..", nil}, + {"/a/b/c/d/./../../../../../..", nil}, + {"/a/b/c/d/./../../../", []string{"a"}}, + {"/a/b//c/d/./../../", []string{"a", "b"}}, + {"/a/b/////c/../d///f", []string{"a", "b", "d", "f"}}, } - if !strings.HasSuffix(err.Error(), "(possible lowercased CIDv0; consider converting to a case-agnostic CIDv1, such as base32)") { - t.Fatal("should have meaningful info about case-insensitive fix") + + for _, testCase := range testCases { + segments := StringToSegments(testCase.str) + assert.Equal(t, testCase.expected, segments) + } +} + +func TestSegmentsToString(t *testing.T) { + testCases := []struct { + segments []string + expected string + }{ + {[]string{"a", "b"}, "/a/b"}, + {[]string{"a", "b", "d", "f"}, "/a/b/d/f"}, + {[]string{""}, ""}, + {[]string{}, ""}, + {nil, ""}, + } + + for _, testCase := range testCases { + str := SegmentsToString(testCase.segments...) + assert.Equal(t, testCase.expected, str) } } diff --git a/path/resolver/resolver.go b/path/resolver/resolver.go index f666d4b79..e7cc25f92 100644 --- a/path/resolver/resolver.go +++ b/path/resolver/resolver.go @@ -3,19 +3,17 @@ package resolver import ( "context" - "errors" "fmt" "time" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" "github.com/ipfs/boxo/fetcher" fetcherhelpers "github.com/ipfs/boxo/fetcher/helpers" - path "github.com/ipfs/boxo/path" - "github.com/ipfs/boxo/path/internal" + "github.com/ipfs/boxo/path" cid "github.com/ipfs/go-cid" - format "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" "github.com/ipld/go-ipld-prime" cidlink "github.com/ipld/go-ipld-prime/linking/cid" @@ -24,76 +22,73 @@ import ( "github.com/ipld/go-ipld-prime/traversal/selector/builder" ) -var log = logging.Logger("pathresolv") +var log = logging.Logger("path/resolver") -// ErrNoComponents is used when Paths after a protocol -// do not contain at least one component -var ErrNoComponents = errors.New( - "path must contain at least one component") - -// ErrNoLink is returned when a link is not found in a path +// ErrNoLink is returned when a link is not found in a path. type ErrNoLink struct { Name string Node cid.Cid } -// Error implements the Error interface for ErrNoLink with a useful -// human readable message. -func (e ErrNoLink) Error() string { +// Error implements the [errors.Error] interface. +func (e *ErrNoLink) Error() string { return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.String()) } +// Is implements [errors.Is] interface. +func (e *ErrNoLink) Is(err error) bool { + switch err.(type) { + case *ErrNoLink: + return true + default: + return false + } +} + // Resolver provides path resolution to IPFS. type Resolver interface { - // ResolveToLastNode walks the given path and returns the cid of the - // last block referenced by the path, and the path segments to - // traverse from the final block boundary to the final node within the - // block. - ResolveToLastNode(ctx context.Context, fpath path.Path) (cid.Cid, []string, error) - // ResolvePath fetches the node for given path. It returns the last - // item returned by ResolvePathComponents and the last link traversed - // which can be used to recover the block. - ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, ipld.Link, error) - // ResolvePathComponents fetches the nodes for each segment of the given path. - // It uses the first path component as a hash (key) of the first node, then - // resolves all other components walking the links via a selector traversal - ResolvePathComponents(ctx context.Context, fpath path.Path) ([]ipld.Node, error) + // ResolveToLastNode walks the given path and returns the CID of the last block + // referenced by the path, as well as the remainder of the path segments to traverse + // from the final block boundary to the final node within the block. + ResolveToLastNode(context.Context, path.ImmutablePath) (cid.Cid, []string, error) + + // ResolvePath fetches the node for the given path. It returns the last item returned + // by [Resolver.ResolvePathComponents] and the last link traversed which can be used + // to recover the block. + ResolvePath(context.Context, path.ImmutablePath) (ipld.Node, ipld.Link, error) + + // ResolvePathComponents fetches the nodes for each segment of the given path. It + // uses the first path component as the CID of the first node, then resolves all + // other components walking the links via a selector traversal. + ResolvePathComponents(context.Context, path.ImmutablePath) ([]ipld.Node, error) } -// basicResolver implements the Resolver interface. -// It references a FetcherFactory, which is uses to resolve nodes. -// TODO: now that this is more modular, try to unify this code with the -// -// the resolvers in namesys. +// basicResolver implements the [Resolver] interface. It requires a [fetcher.Factory], +// which is used to resolve the nodes. type basicResolver struct { FetcherFactory fetcher.Factory } -// NewBasicResolver constructs a new basic resolver. -func NewBasicResolver(fetcherFactory fetcher.Factory) Resolver { +// NewBasicResolver constructs a new basic resolver using the given [fetcher.Factory]. +func NewBasicResolver(factory fetcher.Factory) Resolver { return &basicResolver{ - FetcherFactory: fetcherFactory, + FetcherFactory: factory, } } -// ResolveToLastNode walks the given path and returns the cid of the last -// block referenced by the path, and the path segments to traverse from the -// final block boundary to the final node within the block. -func (r *basicResolver) ResolveToLastNode(ctx context.Context, fpath path.Path) (cid.Cid, []string, error) { - ctx, span := internal.StartSpan(ctx, "basicResolver.ResolveToLastNode", trace.WithAttributes(attribute.Stringer("Path", fpath))) +// ResolveToLastNode implements [Resolver.ResolveToLastNode]. +func (r *basicResolver) ResolveToLastNode(ctx context.Context, fpath path.ImmutablePath) (cid.Cid, []string, error) { + ctx, span := startSpan(ctx, "basicResolver.ResolveToLastNode", trace.WithAttributes(attribute.Stringer("Path", fpath))) defer span.End() - c, p, err := path.SplitAbsPath(fpath) - if err != nil { - return cid.Cid{}, nil, err - } + c, remainder := fpath.RootCid(), fpath.Segments()[2:] - if len(p) == 0 { + if len(remainder) == 0 { return c, nil, nil } // create a selector to traverse and match all path segments - pathSelector := pathAllSelector(p[:len(p)-1]) + pathSelector := pathAllSelector(remainder[:len(remainder)-1]) // create a new cancellable session ctx, cancel := context.WithTimeout(ctx, time.Minute) @@ -107,19 +102,19 @@ func (r *basicResolver) ResolveToLastNode(ctx context.Context, fpath path.Path) if len(nodes) < 1 { return cid.Cid{}, nil, fmt.Errorf("path %v did not resolve to a node", fpath) - } else if len(nodes) < len(p) { - return cid.Undef, nil, ErrNoLink{Name: p[len(nodes)-1], Node: lastCid} + } else if len(nodes) < len(remainder) { + return cid.Undef, nil, &ErrNoLink{Name: remainder[len(nodes)-1], Node: lastCid} } parent := nodes[len(nodes)-1] - lastSegment := p[len(p)-1] + lastSegment := remainder[len(remainder)-1] // find final path segment within node nd, err := parent.LookupBySegment(ipld.ParsePathSegment(lastSegment)) switch err.(type) { case nil: case schema.ErrNoSuchField: - return cid.Undef, nil, ErrNoLink{Name: lastSegment, Node: lastCid} + return cid.Undef, nil, &ErrNoLink{Name: lastSegment, Node: lastCid} default: return cid.Cid{}, nil, err } @@ -127,7 +122,7 @@ func (r *basicResolver) ResolveToLastNode(ctx context.Context, fpath path.Path) // if last node is not a link, just return it's cid, add path to remainder and return if nd.Kind() != ipld.Kind_Link { // return the cid and the remainder of the path - return lastCid, p[len(p)-depth-1:], nil + return lastCid, remainder[len(remainder)-depth-1:], nil } lnk, err := nd.AsLink() @@ -143,27 +138,18 @@ func (r *basicResolver) ResolveToLastNode(ctx context.Context, fpath path.Path) return clnk.Cid, []string{}, nil } -// ResolvePath fetches the node for given path. It returns the last item -// returned by ResolvePathComponents and the last link traversed which can be used to recover the block. +// ResolvePath implements [Resolver.ResolvePath]. // -// Note: if/when the context is cancelled or expires then if a multi-block ADL node is returned then it may not be -// possible to load certain values. -func (r *basicResolver) ResolvePath(ctx context.Context, fpath path.Path) (ipld.Node, ipld.Link, error) { - ctx, span := internal.StartSpan(ctx, "basicResolver.ResolvePath", trace.WithAttributes(attribute.Stringer("Path", fpath))) +// Note: if/when the context is cancelled or expires then if a multi-block ADL +// node is returned then it may not be possible to load certain values. +func (r *basicResolver) ResolvePath(ctx context.Context, fpath path.ImmutablePath) (ipld.Node, ipld.Link, error) { + ctx, span := startSpan(ctx, "basicResolver.ResolvePath", trace.WithAttributes(attribute.Stringer("Path", fpath))) defer span.End() - // validate path - if err := fpath.IsValid(); err != nil { - return nil, nil, err - } - - c, p, err := path.SplitAbsPath(fpath) - if err != nil { - return nil, nil, err - } + c, remainder := fpath.RootCid(), fpath.Segments()[2:] // create a selector to traverse all path segments but only match the last - pathSelector := pathLeafSelector(p) + pathSelector := pathLeafSelector(remainder) nodes, c, _, err := r.resolveNodes(ctx, c, pathSelector) if err != nil { @@ -175,81 +161,29 @@ func (r *basicResolver) ResolvePath(ctx context.Context, fpath path.Path) (ipld. return nodes[len(nodes)-1], cidlink.Link{Cid: c}, nil } -// ResolveSingle simply resolves one hop of a path through a graph with no -// extra context (does not opaquely resolve through sharded nodes) -// Deprecated: fetch node as ipld-prime or convert it and then use a selector to traverse through it. -func ResolveSingle(ctx context.Context, ds format.NodeGetter, nd format.Node, names []string) (*format.Link, []string, error) { - _, span := internal.StartSpan(ctx, "ResolveSingle", trace.WithAttributes(attribute.Stringer("CID", nd.Cid()))) - defer span.End() - return nd.ResolveLink(names) -} - -// ResolvePathComponents fetches the nodes for each segment of the given path. -// It uses the first path component as a hash (key) of the first node, then -// resolves all other components walking the links via a selector traversal +// ResolvePathComponents implements [Resolver.ResolvePathComponents]. // -// Note: if/when the context is cancelled or expires then if a multi-block ADL node is returned then it may not be -// possible to load certain values. -func (r *basicResolver) ResolvePathComponents(ctx context.Context, fpath path.Path) (nodes []ipld.Node, err error) { - ctx, span := internal.StartSpan(ctx, "basicResolver.ResolvePathComponents", trace.WithAttributes(attribute.Stringer("Path", fpath))) +// Note: if/when the context is cancelled or expires then if a multi-block ADL +// node is returned then it may not be possible to load certain values. +func (r *basicResolver) ResolvePathComponents(ctx context.Context, fpath path.ImmutablePath) (nodes []ipld.Node, err error) { + ctx, span := startSpan(ctx, "basicResolver.ResolvePathComponents", trace.WithAttributes(attribute.Stringer("Path", fpath))) defer span.End() defer log.Debugw("resolvePathComponents", "fpath", fpath, "error", err) - // validate path - if err := fpath.IsValid(); err != nil { - return nil, err - } - - c, p, err := path.SplitAbsPath(fpath) - if err != nil { - return nil, err - } + c, remainder := fpath.RootCid(), fpath.Segments()[2:] // create a selector to traverse and match all path segments - pathSelector := pathAllSelector(p) + pathSelector := pathAllSelector(remainder) nodes, _, _, err = r.resolveNodes(ctx, c, pathSelector) return nodes, err } -// ResolveLinks iteratively resolves names by walking the link hierarchy. -// Every node is fetched from the Fetcher, resolving the next name. -// Returns the list of nodes forming the path, starting with ndd. This list is -// guaranteed never to be empty. -// -// ResolveLinks(nd, []string{"foo", "bar", "baz"}) -// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links -// -// Note: if/when the context is cancelled or expires then if a multi-block ADL node is returned then it may not be -// possible to load certain values. -func (r *basicResolver) ResolveLinks(ctx context.Context, ndd ipld.Node, names []string) (nodes []ipld.Node, err error) { - ctx, span := internal.StartSpan(ctx, "basicResolver.ResolveLinks") - defer span.End() - - defer log.Debugw("resolvePathComponents", "names", names, "error", err) - // create a selector to traverse and match all path segments - pathSelector := pathAllSelector(names) - - session := r.FetcherFactory.NewSession(ctx) - - // traverse selector - nodes = []ipld.Node{ndd} - err = session.NodeMatching(ctx, ndd, pathSelector, func(res fetcher.FetchResult) error { - nodes = append(nodes, res.Node) - return nil - }) - if err != nil { - return nil, err - } - - return nodes, err -} - // Finds nodes matching the selector starting with a cid. Returns the matched nodes, the cid of the block containing // the last node, and the depth of the last node within its block (root is depth 0). func (r *basicResolver) resolveNodes(ctx context.Context, c cid.Cid, sel ipld.Node) ([]ipld.Node, cid.Cid, int, error) { - ctx, span := internal.StartSpan(ctx, "basicResolver.resolveNodes", trace.WithAttributes(attribute.Stringer("CID", c))) + ctx, span := startSpan(ctx, "basicResolver.resolveNodes", trace.WithAttributes(attribute.Stringer("CID", c))) defer span.End() session := r.FetcherFactory.NewSession(ctx) @@ -308,3 +242,7 @@ func pathSelector(path []string, ssb builder.SelectorSpecBuilder, reduce func(st } return spec.Node() } + +func startSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return otel.Tracer("boxo/path/resolver").Start(ctx, fmt.Sprintf("Path.%s", name), opts...) +} diff --git a/path/resolver/resolver_test.go b/path/resolver/resolver_test.go index c20f9306d..91f05e7d6 100644 --- a/path/resolver/resolver_test.go +++ b/path/resolver/resolver_test.go @@ -3,7 +3,6 @@ package resolver_test import ( "bytes" "context" - "fmt" "math/rand" "strings" "testing" @@ -21,12 +20,11 @@ import ( merkledag "github.com/ipfs/boxo/ipld/merkledag" dagmock "github.com/ipfs/boxo/ipld/merkledag/test" - path "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/path/resolver" "github.com/ipfs/go-unixfsnode" dagcbor "github.com/ipld/go-ipld-prime/codec/dagcbor" dagjson "github.com/ipld/go-ipld-prime/codec/dagjson" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -38,7 +36,7 @@ func randNode() *merkledag.ProtoNode { return node } -func TestRecurivePathResolution(t *testing.T) { +func TestRecursivePathResolution(t *testing.T) { ctx := context.Background() bsrv := dagmock.Bserv() @@ -47,29 +45,21 @@ func TestRecurivePathResolution(t *testing.T) { c := randNode() err := b.AddNodeLink("grandchild", c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = a.AddNodeLink("child", b) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, n := range []*merkledag.ProtoNode{a, b, c} { err = bsrv.AddBlock(ctx, n) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } - aKey := a.Cid() + p, err := path.Join(path.FromCid(a.Cid()), "child", "grandchild") + require.NoError(t, err) - segments := []string{aKey.String(), "child", "grandchild"} - p, err := path.FromSegments("/ipfs/", segments...) - if err != nil { - t.Fatal(err) - } + imPath, err := path.NewImmutablePath(p) + require.NoError(t, err) fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) fetcherFactory.NodeReifier = unixfsnode.Reify @@ -81,56 +71,26 @@ func TestRecurivePathResolution(t *testing.T) { }) resolver := resolver.NewBasicResolver(fetcherFactory) - node, lnk, err := resolver.ResolvePath(ctx, p) - if err != nil { - t.Fatal(err) - } + node, lnk, err := resolver.ResolvePath(ctx, imPath) + require.NoError(t, err) uNode, ok := node.(unixfsnode.PathedPBNode) require.True(t, ok) fd := uNode.FieldData() byts, err := fd.Must().AsBytes() require.NoError(t, err) + require.Equal(t, cidlink.Link{Cid: c.Cid()}, lnk) + require.Equal(t, c.Data(), byts) - assert.Equal(t, cidlink.Link{Cid: c.Cid()}, lnk) - - assert.Equal(t, c.Data(), byts) - cKey := c.Cid() - - rCid, rest, err := resolver.ResolveToLastNode(ctx, p) - if err != nil { - t.Fatal(err) - } - - if len(rest) != 0 { - t.Error("expected rest to be empty") - } - - if rCid.String() != cKey.String() { - t.Fatal(fmt.Errorf( - "ResolveToLastNode failed for %s: %s != %s", - p.String(), rCid.String(), cKey.String())) - } - - p2, err := path.FromSegments("/ipfs/", aKey.String()) - if err != nil { - t.Fatal(err) - } - - rCid, rest, err = resolver.ResolveToLastNode(ctx, p2) - if err != nil { - t.Fatal(err) - } - - if len(rest) != 0 { - t.Error("expected rest to be empty") - } + rCid, remainder, err := resolver.ResolveToLastNode(ctx, imPath) + require.NoError(t, err) + require.Empty(t, remainder) + require.Equal(t, c.Cid().String(), rCid.String()) - if rCid.String() != aKey.String() { - t.Fatal(fmt.Errorf( - "ResolveToLastNode failed for %s: %s != %s", - p.String(), rCid.String(), cKey.String())) - } + rCid, remainder, err = resolver.ResolveToLastNode(ctx, path.FromCid(a.Cid())) + require.NoError(t, err) + require.Empty(t, remainder) + require.Equal(t, a.Cid().String(), rCid.String()) } func TestResolveToLastNode_ErrNoLink(t *testing.T) { @@ -142,24 +102,16 @@ func TestResolveToLastNode_ErrNoLink(t *testing.T) { c := randNode() err := b.AddNodeLink("grandchild", c) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) err = a.AddNodeLink("child", b) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) for _, n := range []*merkledag.ProtoNode{a, b, c} { err = bsrv.AddBlock(ctx, n) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) } - aKey := a.Cid() - fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) fetcherFactory.PrototypeChooser = dagpb.AddSupportToChooser(func(lnk ipld.Link, lnkCtx ipld.LinkContext) (ipld.NodePrototype, error) { if tlnkNd, ok := lnkCtx.LinkNode.(schema.TypedLinkNode); ok { @@ -171,21 +123,27 @@ func TestResolveToLastNode_ErrNoLink(t *testing.T) { r := resolver.NewBasicResolver(fetcherFactory) // test missing link intermediate segment - segments := []string{aKey.String(), "cheese", "time"} - p, err := path.FromSegments("/ipfs/", segments...) + p, err := path.Join(path.FromCid(a.Cid()), "cheese", "time") require.NoError(t, err) - _, _, err = r.ResolveToLastNode(ctx, p) - require.EqualError(t, err, resolver.ErrNoLink{Name: "cheese", Node: aKey}.Error()) + imPath, err := path.NewImmutablePath(p) + require.NoError(t, err) + + _, _, err = r.ResolveToLastNode(ctx, imPath) + require.ErrorIs(t, err, &resolver.ErrNoLink{}) + require.Equal(t, "cheese", err.(*resolver.ErrNoLink).Name) + require.Equal(t, a.Cid(), err.(*resolver.ErrNoLink).Node) // test missing link at end - bKey := b.Cid() - segments = []string{aKey.String(), "child", "apples"} - p, err = path.FromSegments("/ipfs/", segments...) + p, err = path.Join(path.FromCid(a.Cid()), "child", "apples") + require.NoError(t, err) + + imPath, err = path.NewImmutablePath(p) require.NoError(t, err) - _, _, err = r.ResolveToLastNode(ctx, p) - require.EqualError(t, err, resolver.ErrNoLink{Name: "apples", Node: bKey}.Error()) + _, _, err = r.ResolveToLastNode(ctx, imPath) + require.Equal(t, "apples", err.(*resolver.ErrNoLink).Name) + require.Equal(t, b.Cid(), err.(*resolver.ErrNoLink).Node) } func TestResolveToLastNode_NoUnnecessaryFetching(t *testing.T) { @@ -201,10 +159,10 @@ func TestResolveToLastNode_NoUnnecessaryFetching(t *testing.T) { err = bsrv.AddBlock(ctx, a) require.NoError(t, err) - aKey := a.Cid() + p, err := path.Join(path.FromCid(a.Cid()), "child") + require.NoError(t, err) - segments := []string{aKey.String(), "child"} - p, err := path.FromSegments("/ipfs/", segments...) + imPath, err := path.NewImmutablePath(p) require.NoError(t, err) fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) @@ -217,7 +175,7 @@ func TestResolveToLastNode_NoUnnecessaryFetching(t *testing.T) { fetcherFactory.NodeReifier = unixfsnode.Reify resolver := resolver.NewBasicResolver(fetcherFactory) - resolvedCID, remainingPath, err := resolver.ResolveToLastNode(ctx, p) + resolvedCID, remainingPath, err := resolver.ResolveToLastNode(ctx, imPath) require.NoError(t, err) require.Equal(t, len(remainingPath), 0, "cannot have remaining path") @@ -232,9 +190,11 @@ func TestPathRemainder(t *testing.T) { nb := basicnode.Prototype.Any.NewBuilder() err := dagjson.Decode(nb, strings.NewReader(`{"foo": {"bar": "baz"}}`)) require.NoError(t, err) + out := new(bytes.Buffer) err = dagcbor.Encode(nb.Build(), out) require.NoError(t, err) + lnk, err := cid.Prefix{ Version: 1, Codec: cid.DagCBOR, @@ -242,37 +202,46 @@ func TestPathRemainder(t *testing.T) { MhLength: 32, }.Sum(out.Bytes()) require.NoError(t, err) + blk, err := blocks.NewBlockWithCid(out.Bytes(), lnk) require.NoError(t, err) + bsrv.AddBlock(ctx, blk) fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) resolver := resolver.NewBasicResolver(fetcherFactory) - rp1, remainder, err := resolver.ResolveToLastNode(ctx, path.FromString(lnk.String()+"/foo/bar")) + p, err := path.Join(path.FromCid(lnk), "foo", "bar") require.NoError(t, err) - assert.Equal(t, lnk, rp1) - require.Equal(t, "foo/bar", path.Join(remainder)) + imPath, err := path.NewImmutablePath(p) + require.NoError(t, err) + + rp, remainder, err := resolver.ResolveToLastNode(ctx, imPath) + require.NoError(t, err) + + require.Equal(t, lnk, rp) + require.Equal(t, "foo/bar", strings.Join(remainder, "/")) } func TestResolveToLastNode_MixedSegmentTypes(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + bsrv := dagmock.Bserv() a := randNode() err := bsrv.AddBlock(ctx, a) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) nb := basicnode.Prototype.Any.NewBuilder() json := `{"foo":{"bar":[0,{"boom":["baz",1,2,{"/":"CID"},"blop"]}]}}` json = strings.ReplaceAll(json, "CID", a.Cid().String()) err = dagjson.Decode(nb, strings.NewReader(json)) require.NoError(t, err) + out := new(bytes.Buffer) err = dagcbor.Encode(nb.Build(), out) require.NoError(t, err) + lnk, err := cid.Prefix{ Version: 1, Codec: cid.DagCBOR, @@ -280,15 +249,22 @@ func TestResolveToLastNode_MixedSegmentTypes(t *testing.T) { MhLength: 32, }.Sum(out.Bytes()) require.NoError(t, err) + blk, err := blocks.NewBlockWithCid(out.Bytes(), lnk) require.NoError(t, err) + bsrv.AddBlock(ctx, blk) fetcherFactory := bsfetcher.NewFetcherConfig(bsrv) resolver := resolver.NewBasicResolver(fetcherFactory) - cid, remainder, err := resolver.ResolveToLastNode(ctx, path.FromString(lnk.String()+"/foo/bar/1/boom/3")) + newPath, err := path.Join(path.FromCid(lnk), "foo", "bar", "1", "boom", "3") require.NoError(t, err) - assert.Equal(t, 0, len(remainder)) - assert.True(t, cid.Equals(a.Cid())) + imPath, err := path.NewImmutablePath(newPath) + require.NoError(t, err) + + cid, remainder, err := resolver.ResolveToLastNode(ctx, imPath) + require.NoError(t, err) + require.Equal(t, 0, len(remainder)) + require.True(t, cid.Equals(a.Cid())) } diff --git a/peering/peering.go b/peering/peering.go new file mode 100644 index 000000000..225bcff76 --- /dev/null +++ b/peering/peering.go @@ -0,0 +1,321 @@ +package peering + +import ( + "context" + "errors" + "math/rand" + "strconv" + "sync" + "time" + + "github.com/ipfs/go-log/v2" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/multiformats/go-multiaddr" +) + +const ( + // maxBackoff is the maximum time between reconnect attempts. + maxBackoff = 10 * time.Minute + // The backoff will be cut off when we get within 10% of the actual max. + // If we go over the max, we'll adjust the delay down to a random value + // between 90-100% of the max backoff. + maxBackoffJitter = 10 // % + connmgrTag = "ipfs-peering" + // This needs to be sufficient to prevent two sides from simultaneously + // dialing. + initialDelay = 5 * time.Second +) + +var logger = log.Logger("peering") + +type State uint + +func (s State) String() string { + switch s { + case StateInit: + return "init" + case StateRunning: + return "running" + case StateStopped: + return "stopped" + default: + return "unknown peering state: " + strconv.FormatUint(uint64(s), 10) + } +} + +const ( + StateInit State = iota + StateRunning + StateStopped +) + +// peerHandler keeps track of all state related to a specific "peering" peer. +type peerHandler struct { + peer peer.ID + host host.Host + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + addrs []multiaddr.Multiaddr + reconnectTimer *time.Timer + + nextDelay time.Duration +} + +// setAddrs sets the addresses for this peer. +func (ph *peerHandler) setAddrs(addrs []multiaddr.Multiaddr) { + // Not strictly necessary, but it helps to not trust the calling code. + addrCopy := make([]multiaddr.Multiaddr, len(addrs)) + copy(addrCopy, addrs) + + ph.mu.Lock() + defer ph.mu.Unlock() + ph.addrs = addrCopy +} + +// getAddrs returns a shared slice of addresses for this peer. Do not modify. +func (ph *peerHandler) getAddrs() []multiaddr.Multiaddr { + ph.mu.Lock() + defer ph.mu.Unlock() + return ph.addrs +} + +// stop permanently stops the peer handler. +func (ph *peerHandler) stop() { + ph.cancel() + + ph.mu.Lock() + defer ph.mu.Unlock() + if ph.reconnectTimer != nil { + ph.reconnectTimer.Stop() + ph.reconnectTimer = nil + } +} + +func (ph *peerHandler) nextBackoff() time.Duration { + if ph.nextDelay < maxBackoff { + ph.nextDelay += ph.nextDelay/2 + time.Duration(rand.Int63n(int64(ph.nextDelay))) + } + + // If we've gone over the max backoff, reduce it under the max. + if ph.nextDelay > maxBackoff { + ph.nextDelay = maxBackoff + // randomize the backoff a bit (10%). + ph.nextDelay -= time.Duration(rand.Int63n(int64(maxBackoff) * maxBackoffJitter / 100)) + } + + return ph.nextDelay +} + +func (ph *peerHandler) reconnect() { + // Try connecting + addrs := ph.getAddrs() + logger.Debugw("reconnecting", "peer", ph.peer, "addrs", addrs) + + err := ph.host.Connect(ph.ctx, peer.AddrInfo{ID: ph.peer, Addrs: addrs}) + if err != nil { + logger.Debugw("failed to reconnect", "peer", ph.peer, "error", err) + // Ok, we failed. Extend the timeout. + ph.mu.Lock() + if ph.reconnectTimer != nil { + // Only counts if the reconnectTimer still exists. If not, a + // connection _was_ somehow established. + ph.reconnectTimer.Reset(ph.nextBackoff()) + } + // Otherwise, someone else has stopped us so we can assume that + // we're either connected or someone else will start us. + ph.mu.Unlock() + } + + // Always call this. We could have connected since we processed the + // error. + ph.stopIfConnected() +} + +func (ph *peerHandler) stopIfConnected() { + ph.mu.Lock() + defer ph.mu.Unlock() + + if ph.reconnectTimer != nil && ph.host.Network().Connectedness(ph.peer) == network.Connected { + logger.Debugw("successfully reconnected", "peer", ph.peer) + ph.reconnectTimer.Stop() + ph.reconnectTimer = nil + ph.nextDelay = initialDelay + } +} + +// startIfDisconnected is the inverse of stopIfConnected. +func (ph *peerHandler) startIfDisconnected() { + ph.mu.Lock() + defer ph.mu.Unlock() + + if ph.reconnectTimer == nil && ph.host.Network().Connectedness(ph.peer) != network.Connected { + logger.Debugw("disconnected from peer", "peer", ph.peer) + // Always start with a short timeout so we can stagger things a bit. + ph.reconnectTimer = time.AfterFunc(ph.nextBackoff(), ph.reconnect) + } +} + +// PeeringService maintains connections to specified peers, reconnecting on +// disconnect with a back-off. +type PeeringService struct { + host host.Host + + mu sync.RWMutex + peers map[peer.ID]*peerHandler + state State +} + +// NewPeeringService constructs a new peering service. Peers can be added and +// removed immediately, but connections won't be formed until `Start` is called. +func NewPeeringService(host host.Host) *PeeringService { + return &PeeringService{host: host, peers: make(map[peer.ID]*peerHandler)} +} + +// Start starts the peering service, connecting and maintaining connections to +// all registered peers. It returns an error if the service has already been +// stopped. +func (ps *PeeringService) Start() error { + ps.mu.Lock() + defer ps.mu.Unlock() + + switch ps.state { + case StateInit: + logger.Infow("starting") + case StateRunning: + return nil + case StateStopped: + return errors.New("already stopped") + } + ps.host.Network().Notify((*netNotifee)(ps)) + ps.state = StateRunning + for _, handler := range ps.peers { + go handler.startIfDisconnected() + } + return nil +} + +// GetState get the State of the PeeringService. +func (ps *PeeringService) GetState() State { + ps.mu.RLock() + defer ps.mu.RUnlock() + return ps.state +} + +// Stop stops the peering service. +func (ps *PeeringService) Stop() { + ps.host.Network().StopNotify((*netNotifee)(ps)) + ps.mu.Lock() + defer ps.mu.Unlock() + + switch ps.state { + case StateInit, StateRunning: + logger.Infow("stopping") + for _, handler := range ps.peers { + handler.stop() + } + ps.state = StateStopped + } +} + +// AddPeer adds a peer to the peering service. This function may be safely +// called at any time: before the service is started, while running, or after it +// stops. +// +// Add peer may also be called multiple times for the same peer. The new +// addresses will replace the old. +func (ps *PeeringService) AddPeer(info peer.AddrInfo) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if handler, ok := ps.peers[info.ID]; ok { + logger.Infow("updating addresses", "peer", info.ID, "addrs", info.Addrs) + handler.setAddrs(info.Addrs) + } else { + logger.Infow("peer added", "peer", info.ID, "addrs", info.Addrs) + ps.host.ConnManager().Protect(info.ID, connmgrTag) + + handler = &peerHandler{ + host: ps.host, + peer: info.ID, + addrs: info.Addrs, + nextDelay: initialDelay, + } + handler.ctx, handler.cancel = context.WithCancel(context.Background()) + ps.peers[info.ID] = handler + switch ps.state { + case StateRunning: + go handler.startIfDisconnected() + case StateStopped: + // We still construct everything in this state because + // it's easier to reason about. But we should still free + // resources. + handler.cancel() + } + } +} + +// ListPeers lists peers in the peering service. +func (ps *PeeringService) ListPeers() []peer.AddrInfo { + ps.mu.RLock() + defer ps.mu.RUnlock() + + out := make([]peer.AddrInfo, 0, len(ps.peers)) + for id, addrs := range ps.peers { + ai := peer.AddrInfo{ID: id} + ai.Addrs = append(ai.Addrs, addrs.addrs...) + out = append(out, ai) + } + return out +} + +// RemovePeer removes a peer from the peering service. This function may be +// safely called at any time: before the service is started, while running, or +// after it stops. +func (ps *PeeringService) RemovePeer(id peer.ID) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if handler, ok := ps.peers[id]; ok { + logger.Infow("peer removed", "peer", id) + ps.host.ConnManager().Unprotect(id, connmgrTag) + + handler.stop() + delete(ps.peers, id) + } +} + +type netNotifee PeeringService + +func (nn *netNotifee) Connected(_ network.Network, c network.Conn) { + ps := (*PeeringService)(nn) + + p := c.RemotePeer() + ps.mu.RLock() + defer ps.mu.RUnlock() + + if handler, ok := ps.peers[p]; ok { + // use a goroutine to avoid blocking events. + go handler.stopIfConnected() + } +} + +func (nn *netNotifee) Disconnected(_ network.Network, c network.Conn) { + ps := (*PeeringService)(nn) + + p := c.RemotePeer() + ps.mu.RLock() + defer ps.mu.RUnlock() + + if handler, ok := ps.peers[p]; ok { + // use a goroutine to avoid blocking events. + go handler.startIfDisconnected() + } +} +func (nn *netNotifee) OpenedStream(network.Network, network.Stream) {} +func (nn *netNotifee) ClosedStream(network.Network, network.Stream) {} +func (nn *netNotifee) Listen(network.Network, multiaddr.Multiaddr) {} +func (nn *netNotifee) ListenClose(network.Network, multiaddr.Multiaddr) {} diff --git a/peering/peering_test.go b/peering/peering_test.go new file mode 100644 index 000000000..3d146e3e3 --- /dev/null +++ b/peering/peering_test.go @@ -0,0 +1,172 @@ +package peering + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" + + "github.com/stretchr/testify/require" +) + +func newNode(t *testing.T) host.Host { + cm, err := connmgr.NewConnManager(1, 100, connmgr.WithGracePeriod(0)) + require.NoError(t, err) + h, err := libp2p.New( + libp2p.ListenAddrStrings("/ip4/127.0.0.1/tcp/0"), + // We'd like to set the connection manager low water to 0, but + // that would disable the connection manager. + libp2p.ConnectionManager(cm), + ) + require.NoError(t, err) + return h +} + +func TestPeeringService(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + h1 := newNode(t) + ps1 := NewPeeringService(h1) + + h2 := newNode(t) + h3 := newNode(t) + h4 := newNode(t) + + // peer 1 -> 2 + ps1.AddPeer(peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) + require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) + + // We haven't started so we shouldn't have any peers. + require.Never(t, func() bool { + return len(h1.Network().Peers()) > 0 + }, 100*time.Millisecond, 1*time.Second, "expected host 1 to have no peers") + + // Use p4 to take up the one slot we have in the connection manager. + for _, h := range []host.Host{h1, h2} { + require.NoError(t, h.Connect(ctx, peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()})) + h.ConnManager().TagPeer(h4.ID(), "sticky-peer", 1000) + } + + // Now start. + require.NoError(t, ps1.Start()) + // starting twice is fine. + require.NoError(t, ps1.Start()) + + // We should eventually connect. + t.Logf("waiting for h1 to connect to h2") + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h2.ID()) == network.Connected + }, 30*time.Second, 10*time.Millisecond) + + // Now explicitly connect to h3. + t.Logf("waiting for h1's connection to h3 to work") + require.NoError(t, h1.Connect(ctx, peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()})) + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h3.ID()) == network.Connected + }, 30*time.Second, 100*time.Millisecond) + + require.Len(t, h1.Network().Peers(), 3) + + // force a disconnect + h1.ConnManager().TrimOpenConns(ctx) + + // Should disconnect from h3. + t.Logf("waiting for h1's connection to h3 to disconnect") + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h3.ID()) != network.Connected + }, 5*time.Second, 10*time.Millisecond) + + // Should remain connected to p2 + require.Never(t, func() bool { + return h1.Network().Connectedness(h2.ID()) != network.Connected + }, 5*time.Second, 1*time.Second) + + // Now force h2 to disconnect (we have an asymmetric peering). + conns := h2.Network().ConnsToPeer(h1.ID()) + require.NotEmpty(t, conns) + h2.ConnManager().TrimOpenConns(ctx) + + // All conns to peer should eventually close. + t.Logf("waiting for all connections to close") + for _, c := range conns { + require.Eventually(t, func() bool { + s, err := c.NewStream(context.Background()) + if s != nil { + _ = s.Reset() + } + return err != nil + }, 5*time.Second, 10*time.Millisecond) + } + + // Should eventually re-connect. + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h2.ID()) == network.Connected + }, 30*time.Second, 1*time.Second) + + // Unprotect 2 from 1. + ps1.RemovePeer(h2.ID()) + require.NotContains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) + + // Trim connections. + h1.ConnManager().TrimOpenConns(ctx) + + // Should disconnect + t.Logf("waiting for h1 to disconnect from h2") + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h2.ID()) != network.Connected + }, 5*time.Second, 10*time.Millisecond) + + // Should never reconnect. + t.Logf("ensuring h1 is not connected to h2 again") + require.Never(t, func() bool { + return h1.Network().Connectedness(h2.ID()) == network.Connected + }, 20*time.Second, 1*time.Second) + + // Until added back + ps1.AddPeer(peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) + require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) + ps1.AddPeer(peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()}) + require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h3.ID(), Addrs: h3.Addrs()}) + t.Logf("wait for h1 to connect to h2 and h3 again") + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h2.ID()) == network.Connected + }, 30*time.Second, 1*time.Second) + require.Eventually(t, func() bool { + return h1.Network().Connectedness(h3.ID()) == network.Connected + }, 30*time.Second, 1*time.Second) + + // Should be able to repeatedly stop. + ps1.Stop() + ps1.Stop() + + // Adding and removing should work after stopping. + ps1.AddPeer(peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()}) + require.Contains(t, ps1.ListPeers(), peer.AddrInfo{ID: h4.ID(), Addrs: h4.Addrs()}) + ps1.RemovePeer(h2.ID()) + require.NotContains(t, ps1.ListPeers(), peer.AddrInfo{ID: h2.ID(), Addrs: h2.Addrs()}) +} + +func TestNextBackoff(t *testing.T) { + minMaxBackoff := (100 - maxBackoffJitter) / 100 * maxBackoff + for x := 0; x < 1000; x++ { + ph := peerHandler{nextDelay: time.Second} + for min, max := time.Second*3/2, time.Second*5/2; min < minMaxBackoff; min, max = min*3/2, max*5/2 { + b := ph.nextBackoff() + if b > max || b < min { + t.Errorf("expected backoff %s to be between %s and %s", b, min, max) + } + } + for i := 0; i < 100; i++ { + b := ph.nextBackoff() + if b < minMaxBackoff || b > maxBackoff { + t.Fatal("failed to stay within max bounds") + } + } + } +} diff --git a/pinning/remote/client/openapi/README.md b/pinning/remote/client/openapi/README.md index fe21b9a00..eddae08b7 100644 --- a/pinning/remote/client/openapi/README.md +++ b/pinning/remote/client/openapi/README.md @@ -151,7 +151,7 @@ Note, enum values are always validated and all unused variables are silently ign ### URLs Configuration per Operation Each operation can use different server URL defined using `OperationServers` map in the `Configuration`. -An operation is uniquely identifield by `"{classname}Service.{nickname}"` string. +An operation is uniquely identified by `"{classname}Service.{nickname}"` string. Similar rules for overriding default operation server index and variables applies by using `sw.ContextOperationServerIndices` and `sw.ContextOperationServerVariables` context maps. ``` diff --git a/routing/http/client/client.go b/routing/http/client/client.go index 4a0d29b33..cda9097fd 100644 --- a/routing/http/client/client.go +++ b/routing/http/client/client.go @@ -28,7 +28,7 @@ import ( ) var ( - _ contentrouter.Client = &client{} + _ contentrouter.Client = &Client{} logger = logging.Logger("routing/http/client") defaultHTTPClient = &http.Client{ Transport: &ResponseBodyLimitedTransport{ @@ -45,7 +45,7 @@ const ( mediaTypeIPNSRecord = "application/vnd.ipfs.ipns-record" ) -type client struct { +type Client struct { baseURL string httpClient httpClient clock clock.Clock @@ -65,28 +65,28 @@ type client struct { // version sent a request var defaultUserAgent = moduleVersion() -var _ contentrouter.Client = &client{} +var _ contentrouter.Client = &Client{} type httpClient interface { Do(req *http.Request) (*http.Response, error) } -type Option func(*client) +type Option func(*Client) func WithIdentity(identity crypto.PrivKey) Option { - return func(c *client) { + return func(c *Client) { c.identity = identity } } func WithHTTPClient(h httpClient) Option { - return func(c *client) { + return func(c *Client) { c.httpClient = h } } func WithUserAgent(ua string) Option { - return func(c *client) { + return func(c *Client) { if ua == "" { return } @@ -103,7 +103,7 @@ func WithUserAgent(ua string) Option { } func WithProviderInfo(peerID peer.ID, addrs []multiaddr.Multiaddr) Option { - return func(c *client) { + return func(c *Client) { c.peerID = peerID for _, a := range addrs { c.addrs = append(c.addrs, types.Multiaddr{Multiaddr: a}) @@ -112,15 +112,15 @@ func WithProviderInfo(peerID peer.ID, addrs []multiaddr.Multiaddr) Option { } func WithStreamResultsRequired() Option { - return func(c *client) { + return func(c *Client) { c.accepts = mediaTypeNDJSON } } // New creates a content routing API client. // The Provider and identity parameters are option. If they are nil, the [client.ProvideBitswap] method will not function. -func New(baseURL string, opts ...Option) (*client, error) { - client := &client{ +func New(baseURL string, opts ...Option) (*Client, error) { + client := &Client{ baseURL: baseURL, httpClient: defaultHTTPClient, clock: clock.New(), @@ -160,7 +160,9 @@ func (c *measuringIter[T]) Close() error { return c.Iter.Close() } -func (c *client) FindProviders(ctx context.Context, key cid.Cid) (providers iter.ResultIter[types.Record], err error) { +// FindProviders searches for providers that are able to provide the given [cid.Cid]. +// In a more generic way, it is also used as a mapping between CIDs and relevant metadata. +func (c *Client) FindProviders(ctx context.Context, key cid.Cid) (providers iter.ResultIter[types.Record], err error) { // TODO test measurements m := newMeasurement("FindProviders") @@ -237,7 +239,7 @@ func (c *client) FindProviders(ctx context.Context, key cid.Cid) (providers iter // Deprecated: protocol-agnostic provide is being worked on in [IPIP-378]: // // [IPIP-378]: https://github.com/ipfs/specs/pull/378 -func (c *client) ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Duration) (time.Duration, error) { +func (c *Client) ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Duration) (time.Duration, error) { if c.identity == nil { return 0, errors.New("cannot provide Bitswap records without an identity") } @@ -283,7 +285,7 @@ func (c *client) ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Du // ProvideAsync makes a provide request to a delegated router // //lint:ignore SA1019 // ignore staticcheck -func (c *client) provideSignedBitswapRecord(ctx context.Context, bswp *types.WriteBitswapRecord) (time.Duration, error) { +func (c *Client) provideSignedBitswapRecord(ctx context.Context, bswp *types.WriteBitswapRecord) (time.Duration, error) { //lint:ignore SA1019 // ignore staticcheck req := jsontypes.WriteProvidersRequest{Providers: []types.Record{bswp}} @@ -332,7 +334,8 @@ func (c *client) provideSignedBitswapRecord(ctx context.Context, bswp *types.Wri return 0, nil } -func (c *client) FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultIter[types.Record], err error) { +// FindPeers searches for information for the given [peer.ID]. +func (c *Client) FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultIter[*types.PeerRecord], err error) { m := newMeasurement("FindPeers") url := c.baseURL + "/routing/v1/peers/" + peer.ToCid(pid).String() @@ -359,7 +362,7 @@ func (c *client) FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultI if resp.StatusCode == http.StatusNotFound { resp.Body.Close() m.record(ctx) - return iter.FromSlice[iter.Result[types.Record]](nil), nil + return iter.FromSlice[iter.Result[*types.PeerRecord]](nil), nil } if resp.StatusCode != http.StatusOK { @@ -387,25 +390,28 @@ func (c *client) FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultI } }() - var it iter.ResultIter[types.Record] + var it iter.ResultIter[*types.PeerRecord] switch mediaType { case mediaTypeJSON: parsedResp := &jsontypes.PeersResponse{} err = json.NewDecoder(resp.Body).Decode(parsedResp) - var sliceIt iter.Iter[types.Record] = iter.FromSlice(parsedResp.Peers) + var sliceIt iter.Iter[*types.PeerRecord] = iter.FromSlice(parsedResp.Peers) it = iter.ToResultIter(sliceIt) case mediaTypeNDJSON: skipBodyClose = true - it = ndjson.NewRecordsIter(resp.Body) + it = ndjson.NewPeerRecordsIter(resp.Body) default: logger.Errorw("unknown media type", "MediaType", mediaType, "ContentType", respContentType) return nil, errors.New("unknown content type") } - return &measuringIter[iter.Result[types.Record]]{Iter: it, ctx: ctx, m: m}, nil + return &measuringIter[iter.Result[*types.PeerRecord]]{Iter: it, ctx: ctx, m: m}, nil } -func (c *client) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { +// GetIPNS tries to retrieve the [ipns.Record] for the given [ipns.Name]. The record is +// validated against the given name. If validation fails, an error is returned, but no +// record. +func (c *Client) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { url := c.baseURL + "/routing/v1/ipns/" + name.String() httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) @@ -443,7 +449,8 @@ func (c *client) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, err return record, nil } -func (c *client) PutIPNS(ctx context.Context, name ipns.Name, record *ipns.Record) error { +// PutIPNS attempts at putting the given [ipns.Record] for the given [ipns.Name]. +func (c *Client) PutIPNS(ctx context.Context, name ipns.Name, record *ipns.Record) error { url := c.baseURL + "/routing/v1/ipns/" + name.String() rawRecord, err := ipns.MarshalRecord(record) diff --git a/routing/http/client/client_test.go b/routing/http/client/client_test.go index 95683bc3f..7edd77c10 100644 --- a/routing/http/client/client_test.go +++ b/routing/http/client/client_test.go @@ -12,9 +12,8 @@ import ( "time" "github.com/benbjohnson/clock" - "github.com/ipfs/boxo/coreiface/path" ipns "github.com/ipfs/boxo/ipns" - ipfspath "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/routing/http/server" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" @@ -43,9 +42,9 @@ func (m *mockContentRouter) ProvideBitswap(ctx context.Context, req *server.Bits return args.Get(0).(time.Duration), args.Error(1) } -func (m *mockContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[types.Record], error) { +func (m *mockContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[*types.PeerRecord], error) { args := m.Called(ctx, pid, limit) - return args.Get(0).(iter.ResultIter[types.Record]), args.Error(1) + return args.Get(0).(iter.ResultIter[*types.PeerRecord]), args.Error(1) } func (m *mockContentRouter) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { @@ -67,7 +66,7 @@ type testDeps struct { server *httptest.Server peerID peer.ID addrs []multiaddr.Multiaddr - client *client + client *Client } type recordingHandler struct { @@ -487,7 +486,7 @@ func TestClient_Provide(t *testing.T) { func TestClient_FindPeers(t *testing.T) { peerRecord := makePeerRecord() - peerRecords := []iter.Result[types.Record]{ + peerRecords := []iter.Result[*types.PeerRecord]{ {Val: &peerRecord}, } pid := *peerRecord.ID @@ -496,13 +495,13 @@ func TestClient_FindPeers(t *testing.T) { name string httpStatusCode int stopServer bool - routerResult []iter.Result[types.Record] + routerResult []iter.Result[*types.PeerRecord] routerErr error clientRequiresStreaming bool serverStreamingDisabled bool expErrContains osErrContains - expResult []iter.Result[types.Record] + expResult []iter.Result[*types.PeerRecord] expStreamingResponse bool expJSONResponse bool }{ @@ -607,7 +606,7 @@ func TestClient_FindPeers(t *testing.T) { resultIter, err := client.FindPeers(ctx, pid) c.expErrContains.errContains(t, err) - results := iter.ReadAll[iter.Result[types.Record]](resultIter) + results := iter.ReadAll[iter.Result[*types.PeerRecord]](resultIter) assert.Equal(t, c.expResult, results) }) } @@ -627,11 +626,11 @@ func makeIPNSRecord(t *testing.T, sk crypto.PrivKey, opts ...ipns.Option) (*ipns cid, err := cid.Decode("bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4") require.NoError(t, err) - path := path.IpfsPath(cid) + path := path.FromCid(cid) eol := time.Now().Add(time.Hour * 48) ttl := time.Second * 20 - record, err := ipns.NewRecord(sk, ipfspath.FromString(path.String()), 1, eol, ttl, opts...) + record, err := ipns.NewRecord(sk, path, 1, eol, ttl, opts...) require.NoError(t, err) rawRecord, err := ipns.MarshalRecord(record) diff --git a/routing/http/contentrouter/contentrouter.go b/routing/http/contentrouter/contentrouter.go index 2438d4fea..9115ef154 100644 --- a/routing/http/contentrouter/contentrouter.go +++ b/routing/http/contentrouter/contentrouter.go @@ -26,7 +26,7 @@ const ttl = 24 * time.Hour type Client interface { FindProviders(ctx context.Context, key cid.Cid) (iter.ResultIter[types.Record], error) ProvideBitswap(ctx context.Context, keys []cid.Cid, ttl time.Duration) (time.Duration, error) - FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultIter[types.Record], err error) + FindPeers(ctx context.Context, pid peer.ID) (peers iter.ResultIter[*types.PeerRecord], err error) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) PutIPNS(ctx context.Context, name ipns.Name, record *ipns.Record) error } @@ -196,28 +196,15 @@ func (c *contentRouter) FindPeer(ctx context.Context, pid peer.ID) (peer.AddrInf logger.Warnw("error iterating provider responses: %s", res.Err) continue } - v := res.Val - if v.GetSchema() == types.SchemaPeer { - result, ok := v.(*types.PeerRecord) - if !ok { - logger.Errorw( - "problem casting find providers result", - "Schema", v.GetSchema(), - "Type", reflect.TypeOf(v).String(), - ) - continue - } - - var addrs []multiaddr.Multiaddr - for _, a := range result.Addrs { - addrs = append(addrs, a.Multiaddr) - } - - return peer.AddrInfo{ - ID: *result.ID, - Addrs: addrs, - }, nil + var addrs []multiaddr.Multiaddr + for _, a := range res.Val.Addrs { + addrs = append(addrs, a.Multiaddr) } + + return peer.AddrInfo{ + ID: *res.Val.ID, + Addrs: addrs, + }, nil } return peer.AddrInfo{}, err diff --git a/routing/http/contentrouter/contentrouter_test.go b/routing/http/contentrouter/contentrouter_test.go index 83a086997..1c47850b9 100644 --- a/routing/http/contentrouter/contentrouter_test.go +++ b/routing/http/contentrouter/contentrouter_test.go @@ -6,9 +6,8 @@ import ( "testing" "time" - "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/ipns" - ipfspath "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" "github.com/ipfs/go-cid" @@ -33,9 +32,9 @@ func (m *mockClient) FindProviders(ctx context.Context, key cid.Cid) (iter.Resul return args.Get(0).(iter.ResultIter[types.Record]), args.Error(1) } -func (m *mockClient) FindPeers(ctx context.Context, pid peer.ID) (iter.ResultIter[types.Record], error) { +func (m *mockClient) FindPeers(ctx context.Context, pid peer.ID) (iter.ResultIter[*types.PeerRecord], error) { args := m.Called(ctx, pid) - return args.Get(0).(iter.ResultIter[types.Record]), args.Error(1) + return args.Get(0).(iter.ResultIter[*types.PeerRecord]), args.Error(1) } func (m *mockClient) Ready(ctx context.Context) (bool, error) { @@ -184,17 +183,14 @@ func TestFindPeer(t *testing.T) { crc := NewContentRoutingClient(client) p1 := peer.ID("peer1") - ais := []types.Record{ - &types.UnknownRecord{ - Schema: "unknown", - }, - &types.PeerRecord{ + ais := []*types.PeerRecord{ + { Schema: types.SchemaPeer, ID: &p1, Protocols: []string{"transport-bitswap"}, }, } - aisIter := iter.ToResultIter[types.Record](iter.FromSlice(ais)) + aisIter := iter.ToResultIter[*types.PeerRecord](iter.FromSlice(ais)) client.On("FindPeers", ctx, p1).Return(aisIter, nil) @@ -217,11 +213,11 @@ func makeIPNSRecord(t *testing.T, sk crypto.PrivKey, opts ...ipns.Option) (*ipns cid, err := cid.Decode("bafkreifjjcie6lypi6ny7amxnfftagclbuxndqonfipmb64f2km2devei4") require.NoError(t, err) - path := path.IpfsPath(cid) + path := path.FromCid(cid) eol := time.Now().Add(time.Hour * 48) ttl := time.Second * 20 - record, err := ipns.NewRecord(sk, ipfspath.FromString(path.String()), 1, eol, ttl, opts...) + record, err := ipns.NewRecord(sk, path, 1, eol, ttl, opts...) require.NoError(t, err) rawRecord, err := ipns.MarshalRecord(record) diff --git a/routing/http/server/server.go b/routing/http/server/server.go index 9e7d81a04..d9be47eb2 100644 --- a/routing/http/server/server.go +++ b/routing/http/server/server.go @@ -63,7 +63,7 @@ type ContentRouter interface { // FindPeers searches for peers who have the provided [peer.ID]. // Limit indicates the maximum amount of results to return; 0 means unbounded. - FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[types.Record], error) + FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[*types.PeerRecord], error) // GetIPNS searches for an [ipns.Record] for the given [ipns.Name]. GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) @@ -267,7 +267,7 @@ func (s *server) findPeers(w http.ResponseWriter, r *http.Request) { } var ( - handlerFunc func(w http.ResponseWriter, provIter iter.ResultIter[types.Record]) + handlerFunc func(w http.ResponseWriter, provIter iter.ResultIter[*types.PeerRecord]) recordsLimit int ) @@ -347,7 +347,7 @@ func (s *server) provide(w http.ResponseWriter, httpReq *http.Request) { writeJSONResult(w, "Provide", resp) } -func (s *server) findPeersJSON(w http.ResponseWriter, peersIter iter.ResultIter[types.Record]) { +func (s *server) findPeersJSON(w http.ResponseWriter, peersIter iter.ResultIter[*types.PeerRecord]) { defer peersIter.Close() peers, err := iter.ReadAllResults(peersIter) @@ -361,7 +361,7 @@ func (s *server) findPeersJSON(w http.ResponseWriter, peersIter iter.ResultIter[ }) } -func (s *server) findPeersNDJSON(w http.ResponseWriter, peersIter iter.ResultIter[types.Record]) { +func (s *server) findPeersNDJSON(w http.ResponseWriter, peersIter iter.ResultIter[*types.PeerRecord]) { writeResultsIterNDJSON(w, peersIter) } @@ -491,7 +491,7 @@ func logErr(method, msg string, err error) { logger.Infow(msg, "Method", method, "Error", err) } -func writeResultsIterNDJSON(w http.ResponseWriter, resultIter iter.ResultIter[types.Record]) { +func writeResultsIterNDJSON[T any](w http.ResponseWriter, resultIter iter.ResultIter[T]) { defer resultIter.Close() w.Header().Set("Content-Type", mediaTypeNDJSON) diff --git a/routing/http/server/server_test.go b/routing/http/server/server_test.go index f6d4a3dba..f823ac25a 100644 --- a/routing/http/server/server_test.go +++ b/routing/http/server/server_test.go @@ -10,9 +10,8 @@ import ( "testing" "time" - "github.com/ipfs/boxo/coreiface/path" "github.com/ipfs/boxo/ipns" - ipfspath "github.com/ipfs/boxo/path" + "github.com/ipfs/boxo/path" "github.com/ipfs/boxo/routing/http/types" "github.com/ipfs/boxo/routing/http/types/iter" "github.com/ipfs/go-cid" @@ -169,7 +168,7 @@ func TestPeers(t *testing.T) { t.Parallel() _, pid := makePeerID(t) - results := iter.FromSlice([]iter.Result[types.Record]{ + results := iter.FromSlice([]iter.Result[*types.PeerRecord]{ {Val: &types.PeerRecord{ Schema: types.SchemaPeer, ID: &pid, @@ -204,7 +203,7 @@ func TestPeers(t *testing.T) { t.Parallel() _, pid := makePeerID(t) - results := iter.FromSlice([]iter.Result[types.Record]{ + results := iter.FromSlice([]iter.Result[*types.PeerRecord]{ {Val: &types.PeerRecord{ Schema: types.SchemaPeer, ID: &pid, @@ -242,11 +241,11 @@ func makeName(t *testing.T) (crypto.PrivKey, ipns.Name) { } func makeIPNSRecord(t *testing.T, cid cid.Cid, sk crypto.PrivKey, opts ...ipns.Option) (*ipns.Record, []byte) { - path := path.IpfsPath(cid) + path := path.FromCid(cid) eol := time.Now().Add(time.Hour * 48) ttl := time.Second * 20 - record, err := ipns.NewRecord(sk, ipfspath.FromString(path.String()), 1, eol, ttl, opts...) + record, err := ipns.NewRecord(sk, path, 1, eol, ttl, opts...) require.NoError(t, err) rawRecord, err := ipns.MarshalRecord(record) @@ -375,9 +374,9 @@ func (m *mockContentRouter) ProvideBitswap(ctx context.Context, req *BitswapWrit return args.Get(0).(time.Duration), args.Error(1) } -func (m *mockContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[types.Record], error) { +func (m *mockContentRouter) FindPeers(ctx context.Context, pid peer.ID, limit int) (iter.ResultIter[*types.PeerRecord], error) { args := m.Called(ctx, pid, limit) - return args.Get(0).(iter.ResultIter[types.Record]), args.Error(1) + return args.Get(0).(iter.ResultIter[*types.PeerRecord]), args.Error(1) } func (m *mockContentRouter) GetIPNS(ctx context.Context, name ipns.Name) (*ipns.Record, error) { diff --git a/routing/http/types/json/responses.go b/routing/http/types/json/responses.go index dfcfad830..cc687df48 100644 --- a/routing/http/types/json/responses.go +++ b/routing/http/types/json/responses.go @@ -13,7 +13,7 @@ type ProvidersResponse struct { // PeersResponse is the result of a GET Peers request. type PeersResponse struct { - Peers RecordsArray + Peers []*types.PeerRecord } // RecordsArray is an array of [types.Record] diff --git a/routing/http/types/ndjson/records.go b/routing/http/types/ndjson/records.go index d1a36b411..819cd521d 100644 --- a/routing/http/types/ndjson/records.go +++ b/routing/http/types/ndjson/records.go @@ -44,3 +44,30 @@ func NewRecordsIter(r io.Reader) iter.Iter[iter.Result[types.Record]] { return iter.Map[iter.Result[types.UnknownRecord]](jsonIter, mapFn) } + +// NewPeerRecordsIter returns an iterator that reads [types.PeerRecord] from the given [io.Reader]. +// Records with a different schema are safely ignored. If you want to read all records, use +// [NewRecordsIter] instead. +func NewPeerRecordsIter(r io.Reader) iter.Iter[iter.Result[*types.PeerRecord]] { + jsonIter := iter.FromReaderJSON[types.UnknownRecord](r) + mapFn := func(upr iter.Result[types.UnknownRecord]) iter.Result[*types.PeerRecord] { + var result iter.Result[*types.PeerRecord] + if upr.Err != nil { + result.Err = upr.Err + return result + } + switch upr.Val.Schema { + case types.SchemaPeer: + var prov types.PeerRecord + err := json.Unmarshal(upr.Val.Bytes, &prov) + if err != nil { + result.Err = err + return result + } + result.Val = &prov + } + return result + } + + return iter.Map[iter.Result[types.UnknownRecord]](jsonIter, mapFn) +} diff --git a/util/util.go b/util/util.go index ffcab2f33..7a96ae393 100644 --- a/util/util.go +++ b/util/util.go @@ -3,6 +3,7 @@ package util import ( + "crypto/subtle" "errors" "io" "math/rand" @@ -88,24 +89,6 @@ func GetenvBool(name string) bool { return v == "true" || v == "t" || v == "1" } -// MultiErr is a util to return multiple errors -type MultiErr []error - -func (m MultiErr) Error() string { - if len(m) == 0 { - return "no errors" - } - - s := "Multiple errors: " - for i, e := range m { - if i != 0 { - s += ", " - } - s += e.Error() - } - return s -} - // Partition splits a subject 3 parts: prefix, separator, suffix. // The first occurrence of the separator will be matched. // ie. Partition("Ready, steady, go!", ", ") -> ["Ready", ", ", "steady, go!"] @@ -150,9 +133,9 @@ func IsValidHash(s string) bool { // XOR takes two byte slices, XORs them together, returns the resulting slice. func XOR(a, b []byte) []byte { + _ = b[len(a)-1] // keeping same behaviour as previously but this looks like a bug + c := make([]byte, len(a)) - for i := 0; i < len(a); i++ { - c[i] = a[i] ^ b[i] - } + subtle.XORBytes(c, a, b) return c } diff --git a/version.json b/version.json index b947c058b..e0943d02a 100644 --- a/version.json +++ b/version.json @@ -1,3 +1,3 @@ { - "version": "v0.13.1" + "version": "v0.16.0" }