mirror of
https://github.com/orbitdb/orbitdb.git
synced 2025-07-30 15:23:14 +00:00
Compare commits
157 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d9e62979d8 | ||
![]() |
ae7f464aea | ||
![]() |
1520eae162 | ||
![]() |
0cabfdbae7 | ||
![]() |
b116103628 | ||
![]() |
d0004044f2 | ||
![]() |
231976998f | ||
![]() |
b539877a39 | ||
![]() |
6c89d68a25 | ||
![]() |
85686ba478 | ||
![]() |
90f84bf12f | ||
![]() |
6f72fdddfe | ||
![]() |
dc150b9cc2 | ||
![]() |
0295262092 | ||
![]() |
1d08f996c5 | ||
![]() |
5dc8ea25fd | ||
![]() |
46160978f8 | ||
![]() |
dccbd1c6a2 | ||
![]() |
05e4250b6b | ||
![]() |
a19cbc1edd | ||
![]() |
1f07d25706 | ||
![]() |
45f6b972ee | ||
![]() |
4512b16b0b | ||
![]() |
56997f1724 | ||
![]() |
81c7c2b8b8 | ||
![]() |
116d2fb91e | ||
![]() |
ea1c16a491 | ||
![]() |
c3d8b37915 | ||
![]() |
58fa496f2f | ||
![]() |
57204b1bae | ||
![]() |
2aab9667c4 | ||
![]() |
a66c4e01bb | ||
![]() |
da1a0873fd | ||
![]() |
03f0b1141e | ||
![]() |
6f3b605174 | ||
![]() |
b04d431465 | ||
![]() |
061592b315 | ||
![]() |
e144bf760c | ||
![]() |
7110da6e2d | ||
![]() |
5f54993660 | ||
![]() |
b5926cb972 | ||
![]() |
11c11ffe61 | ||
![]() |
494dfdb1ef | ||
![]() |
1b470fdb68 | ||
![]() |
35bf38d8d8 | ||
![]() |
c5898dd58a | ||
![]() |
8ff138de46 | ||
![]() |
3d41ab5d99 | ||
![]() |
d504e0f7ba | ||
![]() |
28b3e807de | ||
![]() |
41f015cb13 | ||
![]() |
d290032ebf | ||
![]() |
9612a61c6f | ||
![]() |
4fccfda975 | ||
![]() |
adb8d77aa2 | ||
![]() |
ed12f2b2cf | ||
![]() |
c3be1d2fac | ||
![]() |
af79cb8235 | ||
![]() |
0f062cb38d | ||
![]() |
497cf43cc7 | ||
![]() |
ff8b736ad9 | ||
![]() |
90dc85077c | ||
![]() |
4a5822c4a1 | ||
![]() |
bef35b013c | ||
![]() |
89ff328325 | ||
![]() |
d1ddc2ec98 | ||
![]() |
830902ed3d | ||
![]() |
a9dcdfdc5c | ||
![]() |
2f7a6270a8 | ||
![]() |
1daa4bf6a5 | ||
![]() |
1a975454e9 | ||
![]() |
1ae9e124d3 | ||
![]() |
f01476c00a | ||
![]() |
c79a207c99 | ||
![]() |
b65e3b03d0 | ||
![]() |
0eece8a771 | ||
![]() |
26cadb1cf9 | ||
![]() |
30fbeb4243 | ||
![]() |
58769c4d15 | ||
![]() |
2aee8e408c | ||
![]() |
e1ef3224b4 | ||
![]() |
a0f434c3fa | ||
![]() |
b55319b3d1 | ||
![]() |
378d519e18 | ||
![]() |
41e9be5569 | ||
![]() |
c0354746bd | ||
![]() |
1b99ae254b | ||
![]() |
cfd0bc1b41 | ||
![]() |
f15b704153 | ||
![]() |
af55f118fe | ||
![]() |
b6c03fac00 | ||
![]() |
52fc148c55 | ||
![]() |
1c9b4dc5ce | ||
![]() |
e4803f64f6 | ||
![]() |
2f50dee80f | ||
![]() |
17e322cd71 | ||
![]() |
c173a01389 | ||
![]() |
71c2505f20 | ||
![]() |
9ddffd346a | ||
![]() |
d83bfa9fc8 | ||
![]() |
967a754872 | ||
![]() |
383420750e | ||
![]() |
f30789fece | ||
![]() |
7e5672eff0 | ||
![]() |
f272d3ee3c | ||
![]() |
90f66cfe5d | ||
![]() |
ac3011c873 | ||
![]() |
cce6a524e2 | ||
![]() |
888d3e8af3 | ||
![]() |
373d2d8414 | ||
![]() |
796308ca7f | ||
![]() |
f5d0d501dd | ||
![]() |
1d5c18e419 | ||
![]() |
6409eaae9f | ||
![]() |
aab912581e | ||
![]() |
ed2c5e7d8c | ||
![]() |
1cdb186d06 | ||
![]() |
1a533e6b46 | ||
![]() |
bd1eb71e44 | ||
![]() |
1564ee0325 | ||
![]() |
e6afa0b7e7 | ||
![]() |
7ff642be86 | ||
![]() |
f3e15ecfac | ||
![]() |
31433f3ff6 | ||
![]() |
439d890839 | ||
![]() |
63b93c2168 | ||
![]() |
0550a44949 | ||
![]() |
a2454064b4 | ||
![]() |
7b818d26a2 | ||
![]() |
40b600d9e6 | ||
![]() |
f6d1c7dbaf | ||
![]() |
e02a92ffc5 | ||
![]() |
2f75420842 | ||
![]() |
61f15fd556 | ||
![]() |
16686c6a82 | ||
![]() |
5a73b9cdfb | ||
![]() |
16b3358355 | ||
![]() |
734f50ed7e | ||
![]() |
e5351fe087 | ||
![]() |
a3ea2e1165 | ||
![]() |
3e93d294bf | ||
![]() |
18eb27c44e | ||
![]() |
e439453262 | ||
![]() |
af0048991c | ||
![]() |
aa0f7f3c42 | ||
![]() |
848112a232 | ||
![]() |
8c29b77eab | ||
![]() |
d6b9c41095 | ||
![]() |
c666774b75 | ||
![]() |
03f8a69856 | ||
![]() |
7001c7528b | ||
![]() |
12d17b1ceb | ||
![]() |
9a5ca21577 | ||
![]() |
d37d2c9586 | ||
![]() |
34014d8136 | ||
![]() |
ea6bfd93e0 | ||
![]() |
a7ec7b7b41 |
4
.github/workflows/api-publish.yml
vendored
4
.github/workflows/api-publish.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
||||
registry-url: https://registry.npmjs.org/
|
||||
- run: npm ci
|
||||
- run: npm run build:docs
|
||||
- uses: actions/upload-pages-artifact@v2
|
||||
- uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: ./docs/api/
|
||||
|
||||
@ -37,5 +37,5 @@ jobs:
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2 # or the latest "vX.X.X" version tag for this action
|
||||
uses: actions/deploy-pages@v4 # or the latest "vX.X.X" version tag for this action
|
||||
|
20
.github/workflows/run-test-node-min-ver.yml
vendored
Normal file
20
.github/workflows/run-test-node-min-ver.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Run Tests (Node Minimum Version)
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 20
|
||||
registry-url: https://registry.npmjs.org/
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Run linter
|
||||
run: npm run lint
|
||||
- name: Run tests
|
||||
run: npm run test:ci
|
1
.well-known/funding-manifest-urls
Normal file
1
.well-known/funding-manifest-urls
Normal file
@ -0,0 +1 @@
|
||||
https://orbitdb.org/funding.json
|
@ -2,4 +2,10 @@
|
||||
|
||||
For now, please refer to our Git commit history for a list of changes.
|
||||
|
||||
https://github.com/orbitdb/orbitdb/commits/v1.0.2
|
||||
https://github.com/orbitdb/orbitdb/compare/v2.5.0...v3.0.2
|
||||
|
||||
You can also use the following git command to generate a log of changes:
|
||||
|
||||
```
|
||||
git log v2.5.0..v3.0.2 --oneline
|
||||
```
|
||||
|
20
README.md
20
README.md
@ -28,7 +28,7 @@ Install OrbitDB and its dependencies:
|
||||
npm install @orbitdb/core helia
|
||||
```
|
||||
|
||||
### Browser <script> tag
|
||||
### Browser `<script>` tag
|
||||
|
||||
OrbitDB can be loaded in the browser using the distributed js file with the `<script/>` tag. OrbitDB is the global namespace and all external functions are available via this namespace:
|
||||
|
||||
@ -45,9 +45,23 @@ If you're using `@orbitdb/core` to develop **browser** or **Node.js** applicatio
|
||||
```javascript
|
||||
import { createHelia } from 'helia'
|
||||
import { createOrbitDB } from '@orbitdb/core'
|
||||
import { gossipsub } from "@chainsafe/libp2p-gossipsub";
|
||||
import { identify } from "@libp2p/identify";
|
||||
import { createLibp2p } from 'libp2p'
|
||||
|
||||
const Libp2pOptions = {
|
||||
services: {
|
||||
pubsub: gossipsub({
|
||||
// neccessary to run a single peer
|
||||
allowPublishToZeroTopicPeers: true
|
||||
}),
|
||||
identify: identify()
|
||||
}
|
||||
}
|
||||
|
||||
;(async function () {
|
||||
const ipfs = await createHelia()
|
||||
const libp2p = await createLibp2p({ ...Libp2pOptions })
|
||||
const ipfs = await createHelia({libp2p})
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
|
||||
// Create / Open a database. Defaults to db type "events".
|
||||
@ -78,6 +92,7 @@ import { createOrbitDB } from '@orbitdb/core'
|
||||
await orbitdb.stop()
|
||||
await ipfs.stop()
|
||||
})()
|
||||
|
||||
```
|
||||
|
||||
To configure your [IPFS instance](https://github.com/ipfs/helia) for persistency and [Libp2p](https://github.com/libp2p/js-libp2p) to connect to peers, see [Creating a Helia instance](https://github.com/orbitdb/quickstart/blob/main/src/index.js) and the [Default Libp2p Configurations](https://github.com/orbitdb/quickstart/blob/main/src/config/libp2p/index.js) in [@orbitdb/quickstart](https://github.com/orbitdb/quickstart/blob/main/src/config/libp2p/index.js) for examples.
|
||||
@ -94,6 +109,7 @@ You can find more advanced topics in our [docs](https://github.com/orbitdb/orbit
|
||||
- [Connecting Peers](https://github.com/orbitdb/orbitdb/blob/main/docs/CONNECTING_PEERS.md)
|
||||
- [Replication](https://github.com/orbitdb/orbitdb/blob/main/docs/REPLICATION.md)
|
||||
- [Oplog](https://github.com/orbitdb/orbitdb/blob/main/docs/OPLOG.md)
|
||||
- [Encryption](https://github.com/orbitdb/orbitdb/blob/main/docs/ENCRYPTION.md)
|
||||
|
||||
### API
|
||||
|
||||
|
68
benchmarks/orbitdb-documents.js
Normal file
68
benchmarks/orbitdb-documents.js
Normal file
@ -0,0 +1,68 @@
|
||||
import { createOrbitDB } from '../src/index.js'
|
||||
// import { createOrbitDB, MemoryStorage } from '../src/index.js'
|
||||
import { rimraf as rmrf } from 'rimraf'
|
||||
import createHelia from '../test/utils/create-helia.js'
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
EventEmitter.defaultMaxListeners = 10000
|
||||
|
||||
;(async () => {
|
||||
console.log('Starting benchmark...')
|
||||
|
||||
const entryCount = 1000
|
||||
|
||||
await rmrf('./ipfs')
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
const ipfs = await createHelia()
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
|
||||
console.log(`Insert ${entryCount} documents`)
|
||||
|
||||
// const entryStorage = await MemoryStorage()
|
||||
// const headsStorage = await MemoryStorage()
|
||||
// const indexStorage = await MemoryStorage()
|
||||
|
||||
// const db1 = await orbitdb.open('benchmark-documents', { type: 'documents', referencesCount: 16, entryStorage, headsStorage, indexStorage })
|
||||
|
||||
const db1 = await orbitdb.open('benchmark-documents', { type: 'documents' })
|
||||
|
||||
const startTime1 = new Date().getTime()
|
||||
|
||||
for (let i = 0; i < entryCount; i++) {
|
||||
const doc = { _id: i.toString(), message: 'hello ' + i }
|
||||
await db1.put(doc)
|
||||
}
|
||||
|
||||
const endTime1 = new Date().getTime()
|
||||
const duration1 = endTime1 - startTime1
|
||||
const operationsPerSecond1 = Math.floor(entryCount / (duration1 / 1000))
|
||||
const millisecondsPerOp1 = duration1 / entryCount
|
||||
console.log(`Inserting ${entryCount} documents took ${duration1} ms, ${operationsPerSecond1} ops/s, ${millisecondsPerOp1} ms/op`)
|
||||
|
||||
console.log(`Query ${entryCount} documents`)
|
||||
const startTime2 = new Date().getTime()
|
||||
|
||||
const all = []
|
||||
for await (const { key, value } of db1.iterator()) {
|
||||
all.unshift({ key, value })
|
||||
}
|
||||
|
||||
const endTime2 = new Date().getTime()
|
||||
const duration2 = endTime2 - startTime2
|
||||
const operationsPerSecond2 = Math.floor(entryCount / (duration2 / 1000))
|
||||
const millisecondsPerOp2 = duration2 / entryCount
|
||||
|
||||
console.log(`Querying ${all.length} documents took ${duration2} ms, ${operationsPerSecond2} ops/s, ${millisecondsPerOp2} ms/op`)
|
||||
|
||||
await db1.drop()
|
||||
await db1.close()
|
||||
|
||||
await orbitdb.stop()
|
||||
await ipfs.stop()
|
||||
|
||||
await rmrf('./ipfs')
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
process.exit(0)
|
||||
})()
|
60
benchmarks/orbitdb-kv-indexed.js
Normal file
60
benchmarks/orbitdb-kv-indexed.js
Normal file
@ -0,0 +1,60 @@
|
||||
import { createOrbitDB, KeyValueIndexed } from '../src/index.js'
|
||||
import { rimraf as rmrf } from 'rimraf'
|
||||
import createHelia from '../test/utils/create-helia.js'
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
EventEmitter.defaultMaxListeners = 10000
|
||||
|
||||
;(async () => {
|
||||
console.log('Starting benchmark...')
|
||||
|
||||
const entryCount = 1000
|
||||
|
||||
await rmrf('./ipfs')
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
const ipfs = await createHelia()
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
|
||||
console.log(`Set ${entryCount} keys/values`)
|
||||
|
||||
const db1 = await orbitdb.open('benchmark-keyvalue-indexed', { Database: KeyValueIndexed() })
|
||||
|
||||
const startTime1 = new Date().getTime()
|
||||
|
||||
for (let i = 0; i < entryCount; i++) {
|
||||
await db1.set(i.toString(), 'hello' + i)
|
||||
}
|
||||
|
||||
const endTime1 = new Date().getTime()
|
||||
const duration1 = endTime1 - startTime1
|
||||
const operationsPerSecond1 = Math.floor(entryCount / (duration1 / 1000))
|
||||
const millisecondsPerOp1 = duration1 / entryCount
|
||||
console.log(`Setting ${entryCount} key/values took ${duration1} ms, ${operationsPerSecond1} ops/s, ${millisecondsPerOp1} ms/op`)
|
||||
|
||||
console.log(`Iterate ${entryCount} key/values`)
|
||||
const startTime2 = new Date().getTime()
|
||||
|
||||
const all = []
|
||||
for await (const { key, value } of db1.iterator()) {
|
||||
all.unshift({ key, value })
|
||||
}
|
||||
|
||||
const endTime2 = new Date().getTime()
|
||||
const duration2 = endTime2 - startTime2
|
||||
const operationsPerSecond2 = Math.floor(entryCount / (duration2 / 1000))
|
||||
const millisecondsPerOp2 = duration2 / entryCount
|
||||
|
||||
console.log(`Iterating ${all.length} key/values took ${duration2} ms, ${operationsPerSecond2} ops/s, ${millisecondsPerOp2} ms/op`)
|
||||
|
||||
await db1.drop()
|
||||
await db1.close()
|
||||
|
||||
await orbitdb.stop()
|
||||
await ipfs.stop()
|
||||
|
||||
await rmrf('./ipfs')
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
process.exit(0)
|
||||
})()
|
@ -45,8 +45,10 @@ EventEmitter.defaultMaxListeners = 10000
|
||||
let connected = false
|
||||
|
||||
const onJoin = async (peerId) => (connected = true)
|
||||
const onError = async (err) => console.error(err)
|
||||
|
||||
db2.events.on('join', onJoin)
|
||||
db2.events.on('error', onError)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>OrbitDB API - v1.0</title>
|
||||
<title>OrbitDB API - v2.1</title>
|
||||
|
||||
<script src="scripts/prettify/prettify.js"> </script>
|
||||
<script src="scripts/prettify/lang-css.js"> </script>
|
||||
|
@ -38,6 +38,8 @@ export default (env, argv) => {
|
||||
],
|
||||
fallback: {
|
||||
path: require.resolve('path-browserify'),
|
||||
crypto: false,
|
||||
stream: require.resolve('stream-browserify'),
|
||||
process: false
|
||||
}
|
||||
},
|
||||
|
@ -10,11 +10,13 @@ Different access controllers can be assigned to the database using the `AccessCo
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { createOrbitDB } from '@orbitdb/core'
|
||||
import * as SomeAccessController from 'some-access-controller.js'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
import * as SomeAccessController from 'some-access-controller.js'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
const ipfs = await createHelia({ libp2p, blockstore })
|
||||
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
|
||||
@ -39,11 +41,13 @@ To change write access, pass the IPFSAccessController with the `write` parameter
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { createOrbitDB, Identities, IPFSAccessController } from '@orbitdb/core'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
const ipfs = await createHelia({ libp2p, blockstore })
|
||||
|
||||
const identities = await Identities()
|
||||
const anotherIdentity = identities.createIdentity('userB')
|
||||
@ -60,9 +64,11 @@ To allow anyone to write to the database, specify the wildcard '*':
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { createOrbitDB, Identities, IPFSAccessController } from '@orbitdb/core'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
|
||||
@ -80,15 +86,17 @@ The OrbitDB access controller provides configurable write access using grant and
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { createOrbitDB, Identities, OrbitDBAccessController } from '@orbitdb/core'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
|
||||
const identities = await Identities()
|
||||
const identities = await Identities({ ipfs })
|
||||
const anotherIdentity = identities.createIdentity('userB')
|
||||
|
||||
const db = orbitdb.open('my-db', { AccessController: OrbitDBAccessController({ write: [orbitdb.identity.id, anotherIdentity.id]) })
|
||||
@ -162,6 +170,21 @@ const canAppend = async (entry) => {
|
||||
|
||||
In the above example, the `entry.identity` will be the hash of the identity. Using this hash, the entire identity can be retrieved and the identity's id is used to verify write access. `write.includes('*')` is wildcard write and would allow any identity to write to the operations log.
|
||||
|
||||
The Identities getIdentity function will fetch the identity from the Identities storage, requesting it from the remote peer if it is not available locally. To synchronize identities across peers, IPFSStorage must be used. To enable IPFSStorage for Identities, pass either an instance of IPFS or an IPFS-enabled storage to the Identities function.
|
||||
|
||||
To pass IPFS to Identities:
|
||||
|
||||
```
|
||||
const identities = await Identities({ ipfs })
|
||||
```
|
||||
|
||||
To pass IPFS-enabled storage:
|
||||
|
||||
```
|
||||
const storage = await IPFSStorage({ ipfs })
|
||||
const identities = await Identities{{ storage })
|
||||
```
|
||||
|
||||
### Using a custom access controller with OrbitDB
|
||||
|
||||
Before passing the custom access controller to the `open` function, it must be added to OrbitDB's AccessControllers:
|
||||
@ -169,13 +192,15 @@ Before passing the custom access controller to the `open` function, it must be a
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { createOrbitDB, useAccessController } from '@orbitdb/core'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
const ipfs = await createHelia({ libp2p, blockstore })
|
||||
|
||||
useAccessController(CustomAccessController)
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
const db = await orbitdb.open('my-db', { AccessController: CustomAccessController(params) })
|
||||
```
|
||||
```
|
||||
|
@ -72,7 +72,7 @@ const options = {
|
||||
filter: filters.all
|
||||
})
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
services: {
|
||||
identify: identify(),
|
||||
@ -114,7 +114,7 @@ const options = {
|
||||
discoverRelays: 1
|
||||
})
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => {
|
||||
@ -180,7 +180,7 @@ const options = {
|
||||
discoverRelays: 1
|
||||
})
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => {
|
||||
@ -247,7 +247,7 @@ const options = {
|
||||
discoverRelays: 1
|
||||
})
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => {
|
||||
|
@ -208,7 +208,7 @@ for await (const record of db2.iterator()) {
|
||||
}
|
||||
```
|
||||
|
||||
To learn more, check out [OrbitDB's sychronization protocol](https://orbitdb.org/api/module-Sync.html) and the [OrbitDB replication documentation](./REPLICATION.md).
|
||||
To learn more, check out [OrbitDB's sychronization protocol](https://api.orbitdb.org/module-Sync.html) and the [OrbitDB replication documentation](./REPLICATION.md).
|
||||
|
||||
## Custom databases
|
||||
|
||||
@ -264,7 +264,7 @@ CustomDB.type = type
|
||||
export default CustomDB
|
||||
```
|
||||
|
||||
[Documents](../src/db/documents.js), [Events](../src/db/events.js) and [KeyValue](../src/db/keyvalue.js) provide good examples of how a database is implemented in OrbitDB and how to add the logic for returning records from the database (the state of the database).
|
||||
[Documents](../src/databases/documents.js), [Events](../src/databases/events.js) and [KeyValue](../src/databases/keyvalue.js) provide good examples of how a database is implemented in OrbitDB and how to add the logic for returning records from the database (the state of the database).
|
||||
|
||||
To use a custom database, add it to the list of supported database types:
|
||||
|
||||
|
94
docs/ENCRYPTION.md
Normal file
94
docs/ENCRYPTION.md
Normal file
@ -0,0 +1,94 @@
|
||||
# Encryption
|
||||
|
||||
OrbitDB features a modular architecture for database encryption. By passing a module to an OrbitDB database, different encryption methods can be employed.
|
||||
|
||||
OrbitDB project currently maintains a [SimpleEncryption](https://github.com/orbitdb/simple-encryption) module that can be used to get started with encrypted databases.
|
||||
|
||||
**WARNING:** SimpleEncryption is an unaudited encryption module. Use at your own risk.
|
||||
|
||||
## How it works
|
||||
|
||||
OrbitDB encrypts databases in two layers: encrypting the payload and encrypting the log entry.
|
||||
|
||||
Log entry encryption encrypts each log entry fully. Payload encryption encrypts just the value.
|
||||
|
||||
This makes it possible to separate users of a database and replicators of a database, ie. an orbitdb peer can replicate a database without being able to decrypt the payloads (=data) of the database.
|
||||
|
||||
## Configuring encryption
|
||||
|
||||
You can configure OrbitDB to encrypt either the payload data being stored or the entire database.
|
||||
|
||||
To ***encrypt payload data only***, specify an encryption module and pass it to OtbiDB via the encryption object using the `data` property:
|
||||
|
||||
```
|
||||
const data = await EncryptionModule()
|
||||
const encryption = { data }
|
||||
const db = await orbitdb.open(dbNameOrAddress, { encryption })
|
||||
```
|
||||
|
||||
To ***encrypt the database log entries***, specify an encryption module and pass it to OrbitDB via the encryption object using the `replication` property:
|
||||
|
||||
```
|
||||
const replication = await EncryptionModule()
|
||||
const encryption = { replication }
|
||||
const db = await orbitdb.open(dbNameOrAddress, { encryption })
|
||||
```
|
||||
|
||||
To ***encrypt the whole database***, payload data and oplog entries separately, specify an encryption module and pass it to OrbitDB via the encryption object using both the `replication` and `data` properties:
|
||||
|
||||
```
|
||||
const replication = await EncryptionModule()
|
||||
const data = await EncryptionModule()
|
||||
const encryption = { replication, data }
|
||||
const db = await orbitdb.open(dbNameOrAddress, { encryption })
|
||||
```
|
||||
|
||||
## Encrypting Databases
|
||||
|
||||
OrbitDB provides simple password-based encryption via an external module called [SimpleEncryption](https://github.com/orbitdb/simple-encryption).
|
||||
|
||||
**WARNING:** This is an unaudited encryption module. Use at your own risk.
|
||||
|
||||
To install SimpleEncryption:
|
||||
|
||||
```
|
||||
npm i @orbitdb/simple-encryption
|
||||
```
|
||||
|
||||
To use encryption, initiate SimpleEncryption with a password and pass it to OrbitDB when opening your database:
|
||||
|
||||
```js
|
||||
import { SimpleEncryption } from '@orbitdb/simple-encryption'
|
||||
|
||||
const replication = await SimpleEncryption({ password: 'hello' })
|
||||
const data = await SimpleEncryption({ password: 'world' })
|
||||
|
||||
const encryption = { data, replication }
|
||||
|
||||
const db = await orbitdb.open(dbNameOrAddress, { encryption })
|
||||
```
|
||||
|
||||
If you wish to use another encryption type, simply replace SimpleEncryption with the module of your choice.
|
||||
|
||||
## Custom Encryption
|
||||
|
||||
To implement a custom encryption module for OrbitDB, expose encrypt and decrypt functions:
|
||||
|
||||
```
|
||||
const CustomEncryption = async () => {
|
||||
const encrypt = (value) => {
|
||||
// return encrypted value
|
||||
}
|
||||
|
||||
const decrypt = (value) => {
|
||||
// return decrypted value
|
||||
}
|
||||
|
||||
return {
|
||||
encrypt,
|
||||
decrypt
|
||||
}
|
||||
}
|
||||
|
||||
export default CustomEncryption
|
||||
```
|
@ -4,21 +4,15 @@ This guide will help you get up and running with a simple OrbitDB database that
|
||||
|
||||
## install
|
||||
|
||||
Install OrbitDB:
|
||||
Install OrbitDB and Helia:
|
||||
|
||||
```sh
|
||||
npm i @orbitdb/core
|
||||
```
|
||||
|
||||
You will also need Helia for replication:
|
||||
|
||||
```sh
|
||||
npm i helia
|
||||
npm i helia @orbitdb/core
|
||||
```
|
||||
|
||||
## Prerequisites: Helia and Libp2p
|
||||
|
||||
OrbitDB uses Helia for block storage and Libp2p for database synchronization. However, you need to configure Helia and pass it to OrbitDB when creating a peer.
|
||||
OrbitDB uses Helia for block storage and Libp2p for database synchronization. You will need to configure Helia and pass it to OrbitDB when creating a peer.
|
||||
|
||||
### Block Storage
|
||||
|
||||
@ -33,7 +27,7 @@ then instantiate and pass to Helia:
|
||||
```
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const blockstore = new LevelBlockstore('./ipfs/blocks')
|
||||
const ipfs = createHelia({ blockstore })
|
||||
```
|
||||
|
||||
@ -45,58 +39,53 @@ An instance of Libp2p is required by Helia which is then used by OrbitDB to sync
|
||||
|
||||
A simple Node.js example might look something like:
|
||||
|
||||
```json
|
||||
```js
|
||||
{
|
||||
peerDiscovery: [
|
||||
mdns()
|
||||
],
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0/ws']
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
transports: [
|
||||
webSockets({
|
||||
filter: all
|
||||
}),
|
||||
webRTC(),
|
||||
circuitRelayTransport({
|
||||
discoverRelays: 1
|
||||
})
|
||||
tcp()
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => false
|
||||
},
|
||||
services: {
|
||||
identify: identify(),
|
||||
pubsub: gossipsub({ allowPublishToZeroPeers: true })
|
||||
pubsub: gossipsub({ allowPublishToZeroTopicPeers: true })
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can export the above configuration from a file:
|
||||
You can export the above configuration from an ES module:
|
||||
|
||||
```js
|
||||
import { tcp } from '@libp2p/tcp'
|
||||
import { identify } from '@libp2p/identify'
|
||||
import { gossipsub } from '@chainsafe/libp2p-gossipsub'
|
||||
import { noise } from '@chainsafe/libp2p-noise'
|
||||
import { yamux } from '@chainsafe/libp2p-yamux'
|
||||
import { mdns } from '@libp2p/mdns'
|
||||
|
||||
export const Libp2pOptions = {
|
||||
peerDiscovery: [
|
||||
mdns()
|
||||
],
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/0/ws']
|
||||
listen: ['/ip4/0.0.0.0/tcp/0']
|
||||
},
|
||||
transports: [
|
||||
webSockets({
|
||||
filter: all
|
||||
}),
|
||||
webRTC(),
|
||||
circuitRelayTransport({
|
||||
discoverRelays: 1
|
||||
})
|
||||
tcp()
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => false
|
||||
},
|
||||
services: {
|
||||
identify: identify(),
|
||||
pubsub: gossipsub({ allowPublishToZeroPeers: true })
|
||||
pubsub: gossipsub({ allowPublishToZeroTopicPeers: true })
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Throughout this documentation, you will see the above Libp2p configuration imported from a file called **./config/libp2p.js**, for example:
|
||||
@ -115,6 +104,7 @@ Assuming you have a Node.js development environment installed, create a new proj
|
||||
mkdir orbitdb-app
|
||||
cd orbitdb-app
|
||||
npm init
|
||||
npm i helia @orbitdb/core blockstore-level @chainsafe/libp2p-gossipsub
|
||||
```
|
||||
|
||||
Create a file in your project called index.js and add the following code to it:
|
||||
@ -123,11 +113,13 @@ Create a file in your project called index.js and add the following code to it:
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { createOrbitDB } from '@orbitdb/core'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
// Create an IPFS instance.
|
||||
const blockstore = new LevelBlockstore('./ipfs/blocks')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
const ipfs = await createHelia({ libp2p, blockstore })
|
||||
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
|
||||
@ -154,21 +146,20 @@ Run index.js to create your new OrbitDB database:
|
||||
node index.js
|
||||
```
|
||||
|
||||
You should see the address of your new database and the records you have added
|
||||
to it.
|
||||
You should see the address of your new database and the records you have added to it.
|
||||
|
||||
Without a type, OrbitDB defaults to a database type of 'events'. To change the database type, pass the `type` parameter with a valid database type.
|
||||
|
||||
Update:
|
||||
Change:
|
||||
|
||||
```js
|
||||
const db = await orbitdb.open('my-db')
|
||||
```
|
||||
|
||||
to read:
|
||||
to:
|
||||
|
||||
```js
|
||||
const db = await orbitdb.open('my-documents-db', { 'documents '})
|
||||
const db = await orbitdb.open('my-documents-db', { type: 'documents' })
|
||||
```
|
||||
|
||||
Also replace:
|
||||
@ -181,8 +172,8 @@ await db.add('hello world 2')
|
||||
with:
|
||||
|
||||
```js
|
||||
await db.put('doc1', { hello: "world 1", hits: 5 })
|
||||
await db.put('doc2', { hello: "world 2", hits: 2 })
|
||||
await db.put({ _id: "doc1", hello: "world 1", hits: 5 })
|
||||
await db.put({ _id: "doc2", hello: "world 2", hits: 2 })
|
||||
```
|
||||
|
||||
Run index.js again:
|
||||
@ -204,6 +195,7 @@ To create an OrbitDB database peer, create a new project called `orbitdb-peer`:
|
||||
mkdir orbitdb-peer
|
||||
cd orbitdb-peer
|
||||
npm init
|
||||
npm i helia @orbitdb/core blockstore-level @chainsafe/libp2p-gossipsub
|
||||
```
|
||||
|
||||
Create a new file called index.js and paste in the following code:
|
||||
@ -211,50 +203,59 @@ Create a new file called index.js and paste in the following code:
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { OrbitDB, IPFSAccessController } from '@orbitdb/core'
|
||||
import { createOrbitDB, IPFSAccessController } from '@orbitdb/core'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
import { multiaddr } from '@multiformats/multiaddr'
|
||||
|
||||
const main = async () => {
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
|
||||
const main = async () => {
|
||||
// create a random directory to avoid OrbitDB conflicts.
|
||||
let randDir = (Math.random() + 1).toString(36).substring(2)
|
||||
|
||||
const blockstore = new LevelBlockstore(`./${randDir}/ipfs/blocks`)
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p, blockstore })
|
||||
|
||||
const orbitdb = await createOrbitDB({ ipfs, directory: `./${randDir}/orbitdb` })
|
||||
|
||||
let db
|
||||
|
||||
if (process.argv[2]) {
|
||||
if (process.argv[2] && process.argv[3]) {
|
||||
await orbitdb.ipfs.libp2p.dial(multiaddr(process.argv[3]))
|
||||
console.log('opening db', process.argv[2])
|
||||
db = await orbitdb.open(process.argv[2])
|
||||
} else {
|
||||
// When we open a new database, write access is only available to the
|
||||
// db creator. When replicating a database on a remote peer, the remote
|
||||
// peer must also have write access. Here, we are simply allowing anyone
|
||||
// to write to the database. A more robust solution would use the
|
||||
// OrbitDBAccessController to provide "fine-grain" access using grant and
|
||||
// revoke.
|
||||
db = await orbitdb.open('my-db', { AccessController: IPFSAccessController({ write: ['*']})})
|
||||
// db creator. If we want to allow other peers to write to the database,
|
||||
// they must be specified in IPFSAccessController write array param. Here,
|
||||
// we simply allow anyone to write to the database. A more robust solution
|
||||
// would use the OrbitDBAccessController to provide mutable, "fine-grain"
|
||||
// access using grant and revoke.
|
||||
db = await orbitdb.open('my-db', { AccessController: IPFSAccessController({ write: ['*']}) })
|
||||
|
||||
console.log('libp2p address', '(copy one of these addresses then dial into this node from the second node)', orbitdb.ipfs.libp2p.getMultiaddrs())
|
||||
|
||||
// Copy this output if you want to connect a peer to another.
|
||||
console.log('my-db address', '(copy my db address and use when launching peer 2)', db.address)
|
||||
}
|
||||
|
||||
// Copy this output if you want to connect a peer to another.
|
||||
console.log('my-db address', db.address)
|
||||
|
||||
// Add some records to the db when another peers joins.
|
||||
db.events.on('join', async (peerId, heads) => {
|
||||
await db.add('hello world 1')
|
||||
await db.add('hello world 2')
|
||||
})
|
||||
|
||||
db.events.on('update', async (entry) => {
|
||||
console.log('entry', entry)
|
||||
|
||||
// To complete full replication, fetch all the records from the other peer.
|
||||
await db.all()
|
||||
// what has been updated.
|
||||
console.log('update', entry.payload.value)
|
||||
})
|
||||
|
||||
|
||||
if (process.argv[2]) {
|
||||
await db.add('hello from second peer')
|
||||
await db.add('hello again from second peer')
|
||||
} else {
|
||||
// write some records
|
||||
await db.add('hello from first peer')
|
||||
await db.add('hello again from first peer')
|
||||
}
|
||||
// Clean up when stopping this app using ctrl+c
|
||||
process.on('SIGINT', async () => {
|
||||
// print the final state of the db.
|
||||
console.log((await db.all()).map(e => e.value))
|
||||
// Close your db and stop OrbitDB and IPFS.
|
||||
await db.close()
|
||||
await orbitdb.stop()
|
||||
@ -267,30 +268,44 @@ const main = async () => {
|
||||
main()
|
||||
```
|
||||
|
||||
Open two consoles in your command line terminal.
|
||||
Launch peer 1 from the terminal:
|
||||
|
||||
In terminal 1, run the first peer:
|
||||
|
||||
```sh
|
||||
node index.js
|
||||
```bash
|
||||
node test.js
|
||||
```
|
||||
|
||||
When running, you should see the address of the database, for example:
|
||||
Once launched you will see some output which may look something like this:
|
||||
|
||||
```sh
|
||||
my-db address /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj
|
||||
```
|
||||
libp2p address (copy one of these addresses then dial into this node from the second node) [
|
||||
Multiaddr(/ip4/127.0.0.1/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF),
|
||||
Multiaddr(/ip4/192.168.1.22/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF),
|
||||
Multiaddr(/ip4/100.64.100.6/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF)
|
||||
]
|
||||
my-db address (copy my db address and use when launching peer 2) /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj
|
||||
```
|
||||
|
||||
Copy the database's address from terminal 1 and, in terminal 2, run:
|
||||
It contains the libp2p address and db address. You will need both of these when connecting from peer 2.
|
||||
|
||||
```sh
|
||||
node index.js /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj
|
||||
Open another terminal and launch peer 2. The command takes the form `node test.js <orbitdb-address> <libp2p-address>`
|
||||
|
||||
```bash
|
||||
node test.js /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj /ip4/127.0.0.1/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF
|
||||
```
|
||||
|
||||
Upon connection, you should see the records being created in terminal 1's database received by the database running in terminal 2.
|
||||
What is happening is the second peer is dialing the first peer on the /ip4/ address then opens the database.
|
||||
|
||||
|
||||
**PLEASE NOTE:**
|
||||
|
||||
This example is using mDNS to find peers on a local network. This example will not work if each peer is on a different network and you will need to implement an alternative peer discovery mechanism to achieve connectivity. Alternatively, if the address of one of the peers is known and is accessible, the other peer can dial it manually.
|
||||
|
||||
These kinds of connectivity configurations are beyond the scope of OrbitDB. To find out more about connectivity in Libp2p, check out https://connectivity.libp2p.io/.
|
||||
|
||||
## Further Reading
|
||||
|
||||
[Databases](./DATABASES.md) covers database management and data entry in more detail.
|
||||
|
||||
[Replication](./REPLICATION.md) provides a comprehensive overview of how to perform data replication across multiple peers.
|
||||
[Replication](./REPLICATION.md) provides a comprehensive overview of how to perform data replication across multiple peers.
|
||||
|
||||
[Encryption](./ENCRYPTION.md) discusses database encryption using OrbitDB's modular architecture.
|
||||
|
@ -89,7 +89,8 @@ The identity object is stored like any other [IPLD data structure](https://ipld.
|
||||
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia' from 'ipfs-core'
|
||||
import { createHelia } from 'helia'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import * as Block from 'multiformats/block'
|
||||
import { Identities } from '@orbitdb/core'
|
||||
import * as dagCbor from '@ipld/dag-cbor'
|
||||
@ -98,8 +99,9 @@ import { base58btc } from 'multiformats/bases/base58'
|
||||
import { CID } from 'multiformats/cid'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
const ipfs = await createHelia({ libp2p })
|
||||
const ipfs = await createHelia({ libp2p, blockstore })
|
||||
|
||||
const identities = await Identities({ ipfs })
|
||||
const identity = await identities.createIdentity({ id: 'me' })
|
||||
@ -107,7 +109,7 @@ const identity = await identities.createIdentity({ id: 'me' })
|
||||
const cid = CID.parse(identity.hash, base58btc)
|
||||
|
||||
// Extract the hash from the full db path.
|
||||
const bytes = await ipfs.block.get(cid)
|
||||
const bytes = await ipfs.blockstore.get(cid)
|
||||
|
||||
// Defines how we serialize/hash the data.
|
||||
const codec = dagCbor
|
||||
|
@ -5,14 +5,16 @@ Below is a simple replication example. Both peers run within the same Nodejs pro
|
||||
```js
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { LevelBlockstore } from 'blockstore-level'
|
||||
import { createOrbitDB } from '@orbitdb/core'
|
||||
import { Libp2pOptions } from './config/libp2p.js'
|
||||
|
||||
// Our ipfs instances will be connecting over websockets. However, you could achieve the same here using tcp. You can find out more about peer connectivity at https://connectivity.libp2p.io/.
|
||||
// Our ipfs instances will be connecting over tcp. You can find out more about peer connectivity at https://connectivity.libp2p.io/.
|
||||
|
||||
const initIPFSInstance = () => {
|
||||
const blockstore = new LevelBlockstore('./ipfs')
|
||||
const libp2p = await createLibp2p(Libp2pOptions)
|
||||
return createHelia({ libp2p })
|
||||
return createHelia({ libp2p, blockstore })
|
||||
}
|
||||
|
||||
const ipfs1 = await initIPFSInstance()
|
||||
@ -36,9 +38,7 @@ const db1 = await orbitdb1.open('my-db')
|
||||
// synchronization of the heads between db1 and db2.
|
||||
const db2 = await orbitdb2.open(db1.address)
|
||||
|
||||
// We write some data to db1. This will not be replicated on db2 until we
|
||||
// explicitly request these records using db2's iterator or all() convenience
|
||||
// function.
|
||||
// We write some data to db1. This will automatically replicated on db2
|
||||
await db1.add('hello world 1')
|
||||
await db1.add('hello world 2')
|
||||
await db1.add('hello world 3')
|
||||
@ -80,4 +80,4 @@ await orbitdb2.stop()
|
||||
await ipfs2.stop()
|
||||
```
|
||||
|
||||
Refer to the API for more information about [OrbitDB's synchronization protocol](https://orbitdb.org/api/module-Sync.html).
|
||||
Refer to the API for more information about [OrbitDB's synchronization protocol](https://api.orbitdb.org/module-Sync.html).
|
||||
|
@ -1,4 +1,4 @@
|
||||
## OrbitDB API - v2.0
|
||||
## OrbitDB API - v3.0
|
||||
|
||||
OrbitDB is a serverless, distributed, peer-to-peer database. OrbitDB uses IPFS
|
||||
as its data storage and Libp2p Pubsub to automatically sync databases with peers. It's an eventually consistent database that uses Merkle-CRDTs for conflict-free database writes and merges making OrbitDB an excellent choice for p2p and decentralized apps, blockchain applications and local first web applications.
|
||||
@ -9,19 +9,21 @@ To install OrbitDB:
|
||||
npm install @orbitdb/core
|
||||
```
|
||||
|
||||
IPFS is also required:
|
||||
Helia, the Javascript version of IPFS, is also required:
|
||||
|
||||
```bash
|
||||
npm install ipfs-core
|
||||
npm install helia
|
||||
```
|
||||
|
||||
Instantiate OrbitDB and create a database:
|
||||
|
||||
```js
|
||||
import { create } from 'ipfs-core'
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { createOrbitDB } from '@orbitdb/core'
|
||||
|
||||
const ipfs = await create() // IPFS is required for storage and network communication
|
||||
const libp2p = await createLibp2p({ /* Libp2p options */ })
|
||||
const ipfs = await createHelia({ libp2p }) // Helia is required for storage and network communication
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
const mydb = await orbitdb.open('mydb')
|
||||
console.log(mydb.address) // /orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13
|
||||
@ -32,10 +34,12 @@ Open and replicate an existing database:
|
||||
|
||||
```js
|
||||
// In another process
|
||||
import { create } from 'ipfs-core'
|
||||
import { createLibp2p } from 'libp2p'
|
||||
import { createHelia } from 'helia'
|
||||
import { createOrbitDB } from '@orbitdb/core'
|
||||
|
||||
const ipfs = await create()
|
||||
const libp2p = await createLibp2p({ /* Libp2p options */ })
|
||||
const ipfs = await createHelia({ libp2p }) // Helia is required for storage and network
|
||||
const orbitdb = await createOrbitDB({ ipfs })
|
||||
const theirdb = await orbitdb.open('/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
|
||||
for await (let record of theirdb.iterator()) {
|
||||
|
11522
package-lock.json
generated
11522
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
16
package.json
16
package.json
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@orbitdb/core",
|
||||
"version": "2.0.0",
|
||||
"version": "3.0.2",
|
||||
"description": "Distributed p2p database on IPFS",
|
||||
"author": "Haad",
|
||||
"license": "MIT",
|
||||
@ -19,7 +19,7 @@
|
||||
"main": "src/index.js",
|
||||
"dependencies": {
|
||||
"@ipld/dag-cbor": "^9.0.6",
|
||||
"@libp2p/crypto": "^3.0.2",
|
||||
"@libp2p/crypto": "^5.0.5",
|
||||
"it-pipe": "^3.0.1",
|
||||
"level": "^8.0.0",
|
||||
"lru": "^3.1.0",
|
||||
@ -29,19 +29,19 @@
|
||||
"uint8arrays": "^5.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@chainsafe/libp2p-yamux": "^6.0.1",
|
||||
"@helia/block-brokers": "^1.0.0",
|
||||
"@libp2p/circuit-relay-v2": "^1.0.10",
|
||||
"blockstore-level": "^1.1.7",
|
||||
"@chainsafe/libp2p-gossipsub": "^14.1.0",
|
||||
"@libp2p/circuit-relay-v2": "^3.1.0",
|
||||
"@orbitdb/simple-encryption": "^0.0.2",
|
||||
"blockstore-level": "^2.0.1",
|
||||
"c8": "^8.0.1",
|
||||
"cross-env": "^7.0.3",
|
||||
"fs-extra": "^11.2.0",
|
||||
"helia": "^3.0.1",
|
||||
"helia": "^5.3.0",
|
||||
"it-all": "^3.0.4",
|
||||
"jsdoc": "^4.0.2",
|
||||
"mocha": "^10.2.0",
|
||||
"path-browserify": "^1.0.1",
|
||||
"playwright-test": "^14.0.0",
|
||||
"playwright-test": "^14.1.9",
|
||||
"rimraf": "^5.0.5",
|
||||
"standard": "^17.1.0",
|
||||
"webpack": "^5.89.0",
|
||||
|
@ -84,7 +84,7 @@ const IPFSAccessController = ({ write, storage } = {}) => async ({ orbitdb, iden
|
||||
// Allow if the write access list contain the writer's id or is '*'
|
||||
if (write.includes(id) || write.includes('*')) {
|
||||
// Check that the identity is valid
|
||||
return identities.verifyIdentity(writerIdentity)
|
||||
return await identities.verifyIdentity(writerIdentity)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ const OrbitDBAccessController = ({ write } = {}) => async ({ orbitdb, identities
|
||||
// If the ACL contains the writer's public key or it contains '*'
|
||||
const hasWriteAccess = await hasCapability('write', id) || await hasCapability('admin', id)
|
||||
if (hasWriteAccess) {
|
||||
return identities.verifyIdentity(writerIdentity)
|
||||
return await identities.verifyIdentity(writerIdentity)
|
||||
}
|
||||
|
||||
return false
|
||||
|
@ -7,7 +7,7 @@
|
||||
import { EventEmitter } from 'events'
|
||||
import PQueue from 'p-queue'
|
||||
import Sync from './sync.js'
|
||||
import { Log, Entry } from './oplog/index.js'
|
||||
import { Log } from './oplog/index.js'
|
||||
import { ComposedStorage, LRUStorage, IPFSBlockStorage, LevelStorage } from './storage/index.js'
|
||||
import pathJoin from './utils/path-join.js'
|
||||
|
||||
@ -39,10 +39,12 @@ const defaultCacheSize = 1000
|
||||
* automatically. Otherwise, false.
|
||||
* @param {function} [params.onUpdate] A function callback. Fired when an
|
||||
* entry is added to the oplog.
|
||||
* @param {Function} options.encryptFn An encryption function.
|
||||
* @param {Function} options.decryptFn A decryption function.
|
||||
* @return {module:Databases~Database} An instance of Database.
|
||||
* @instance
|
||||
*/
|
||||
const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
|
||||
const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
|
||||
/**
|
||||
* @namespace module:Databases~Database
|
||||
* @description The instance returned by {@link module:Database~Database}.
|
||||
@ -108,7 +110,9 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
|
||||
await LevelStorage({ path: pathJoin(directory, '/log/_index/') })
|
||||
)
|
||||
|
||||
const log = await Log(identity, { logId: address, access, entryStorage, headsStorage, indexStorage })
|
||||
encryption = encryption || {}
|
||||
|
||||
const log = await Log(identity, { logId: address, access, entryStorage, headsStorage, indexStorage, encryption })
|
||||
|
||||
const events = new EventEmitter()
|
||||
|
||||
@ -134,21 +138,23 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
|
||||
return entry.hash
|
||||
}
|
||||
const hash = await queue.add(task)
|
||||
await queue.onIdle()
|
||||
return hash
|
||||
}
|
||||
|
||||
const applyOperation = async (bytes) => {
|
||||
const applyOperation = async (entry) => {
|
||||
const task = async () => {
|
||||
const entry = await Entry.decode(bytes)
|
||||
if (entry) {
|
||||
const updated = await log.joinEntry(entry)
|
||||
if (updated) {
|
||||
if (onUpdate) {
|
||||
await onUpdate(log, entry)
|
||||
try {
|
||||
if (entry) {
|
||||
const updated = await log.joinEntry(entry)
|
||||
if (updated) {
|
||||
if (onUpdate) {
|
||||
await onUpdate(log, entry)
|
||||
}
|
||||
events.emit('update', entry)
|
||||
}
|
||||
events.emit('update', entry)
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
}
|
||||
}
|
||||
await queue.add(task)
|
||||
@ -177,7 +183,7 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
|
||||
* @async
|
||||
*/
|
||||
const drop = async () => {
|
||||
await queue.onIdle()
|
||||
await queue.clear()
|
||||
await log.clear()
|
||||
if (access && access.drop) {
|
||||
await access.drop()
|
||||
|
@ -3,11 +3,11 @@
|
||||
* @memberof module:Databases
|
||||
* @description Documents database.
|
||||
* @example <caption>Create documents db with custom index</caption>
|
||||
* import { create } from 'IPFS'
|
||||
* import { OrbitDB, Documents } from 'orbitdb'
|
||||
* import { createHelia } from 'helia'
|
||||
* import { createOrbitDB, Documents } from 'orbitdb'
|
||||
*
|
||||
* const ipfs = create()
|
||||
* const orbitdb = await OrbitDB({ ipfs })
|
||||
* const ipfs = createHelia()
|
||||
* const orbitdb = await createOrbitDB({ ipfs })
|
||||
* const db = await orbitdb.open('my-docs', { Database: Documents({ indexBy: 'myCustomId'} ) }
|
||||
*
|
||||
* @augments module:Databases~Database
|
||||
@ -25,8 +25,8 @@ const DefaultOptions = { indexBy: '_id' }
|
||||
* @return {module:Databases.Databases-Documents} A Documents function.
|
||||
* @memberof module:Databases
|
||||
*/
|
||||
const Documents = ({ indexBy } = DefaultOptions) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
|
||||
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically })
|
||||
const Documents = ({ indexBy } = DefaultOptions) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encrypt }) => {
|
||||
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, encrypt })
|
||||
|
||||
const { addOperation, log } = database
|
||||
|
||||
|
@ -15,8 +15,8 @@ const type = 'events'
|
||||
* @return {module:Databases.Databases-Events} A Events function.
|
||||
* @memberof module:Databases
|
||||
*/
|
||||
const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
|
||||
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate })
|
||||
const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
|
||||
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption })
|
||||
|
||||
const { addOperation, log } = database
|
||||
|
||||
|
@ -6,12 +6,12 @@
|
||||
*
|
||||
* Key-value pairs are stored to the configured storage.
|
||||
* @example <caption>Specify a custom storage</caption>
|
||||
* import { create } from 'IPFS'
|
||||
* import { OrbitDB, KeyValueIndexed, IPFSBlockStorage } from 'orbitdb'
|
||||
* import { createHelia } from 'helia'
|
||||
* import { createOrbitDB, KeyValueIndexed, IPFSBlockStorage } from 'orbitdb'
|
||||
*
|
||||
* const ipfs = create()
|
||||
* const ipfs = createHelia()
|
||||
* const storage = await IPFSBlockStorage({ ipfs })
|
||||
* const orbitdb = await OrbitDB({ ipfs })
|
||||
* const orbitdb = await createOrbitDB({ ipfs })
|
||||
* const db = await orbitdb.open('my-kv', { Database: KeyValueIndexed({ storage }) })
|
||||
*
|
||||
* @augments module:Databases~Database
|
||||
@ -23,6 +23,84 @@ import pathJoin from '../utils/path-join.js'
|
||||
|
||||
const valueEncoding = 'json'
|
||||
|
||||
/**
|
||||
* Defines an index for a KeyValue database.
|
||||
* @param {string} [directory] A location for storing the index-related data
|
||||
* @return {Index} A Index function.
|
||||
*/
|
||||
const Index = ({ directory } = {}) => async () => {
|
||||
const index = await LevelStorage({ path: directory, valueEncoding })
|
||||
const indexedEntries = await LevelStorage({ path: pathJoin(directory, '/_indexedEntries/'), valueEncoding })
|
||||
|
||||
const update = async (log, entry) => {
|
||||
const keys = new Set()
|
||||
const toBeIndexed = new Set()
|
||||
const latest = entry.hash
|
||||
|
||||
// Function to check if a hash is in the entry index
|
||||
const isIndexed = async (hash) => (await indexedEntries.get(hash)) === true
|
||||
const isNotIndexed = async (hash) => !(await isIndexed(hash))
|
||||
|
||||
// Function to decide when the log traversal should be stopped
|
||||
const shoudStopTraverse = async (entry) => {
|
||||
// Go through the nexts of an entry and if any is not yet
|
||||
// indexed, add it to the list of entries-to-be-indexed
|
||||
for await (const hash of entry.next) {
|
||||
if (await isNotIndexed(hash)) {
|
||||
toBeIndexed.add(hash)
|
||||
}
|
||||
}
|
||||
// If the latest entry and all its nexts are indexed and to-be-indexed list is empty,
|
||||
// we don't have anything more to process, so return true to stop the traversal
|
||||
return await isIndexed(latest) && toBeIndexed.size === 0
|
||||
}
|
||||
|
||||
// Traverse the log and stop when everything has been processed
|
||||
for await (const entry of log.traverse(null, shoudStopTraverse)) {
|
||||
const { hash, payload } = entry
|
||||
// If an entry is not yet indexed, process it
|
||||
if (await isNotIndexed(hash)) {
|
||||
const { op, key } = payload
|
||||
if (op === 'PUT' && !keys.has(key)) {
|
||||
keys.add(key)
|
||||
await index.put(key, entry)
|
||||
await indexedEntries.put(hash, true)
|
||||
} else if (op === 'DEL' && !keys.has(key)) {
|
||||
keys.add(key)
|
||||
await index.del(key)
|
||||
await indexedEntries.put(hash, true)
|
||||
}
|
||||
// Remove the entry (hash) from the list of to-be-indexed entries
|
||||
toBeIndexed.delete(hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes the index and its storages.
|
||||
*/
|
||||
const close = async () => {
|
||||
await index.close()
|
||||
await indexedEntries.close()
|
||||
}
|
||||
|
||||
/**
|
||||
* Drops all records from the index and its storages.
|
||||
*/
|
||||
const drop = async () => {
|
||||
await index.clear()
|
||||
await indexedEntries.clear()
|
||||
}
|
||||
|
||||
return {
|
||||
get: index.get,
|
||||
iterator: index.iterator,
|
||||
update,
|
||||
close,
|
||||
drop
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines a KeyValueIndexed database.
|
||||
* @param {module:Storage} [storage=LevelStorage] A compatible storage where
|
||||
@ -31,36 +109,15 @@ const valueEncoding = 'json'
|
||||
* function.
|
||||
* @memberof module:Databases
|
||||
*/
|
||||
const KeyValueIndexed = ({ storage } = {}) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
|
||||
const indexDirectory = pathJoin(directory || './orbitdb', `./${address}/_index/`)
|
||||
const index = storage || await LevelStorage({ path: indexDirectory, valueEncoding })
|
||||
const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
|
||||
// Set up the directory for an index
|
||||
directory = pathJoin(directory || './orbitdb', `./${address}/_index/`)
|
||||
|
||||
let latestOplogHash
|
||||
// Set up the index
|
||||
const index = await Index({ directory })()
|
||||
|
||||
const _updateIndex = async (log, entry) => {
|
||||
const keys = {}
|
||||
const it = await log.iterator({ gt: latestOplogHash })
|
||||
|
||||
for await (const entry of it) {
|
||||
const { op, key, value } = entry.payload
|
||||
|
||||
if (op === 'PUT' && !keys[key]) {
|
||||
keys[key] = true
|
||||
await index.put(key, value)
|
||||
} else if (op === 'DEL' && !keys[key]) {
|
||||
keys[key] = true
|
||||
await index.del(key)
|
||||
}
|
||||
}
|
||||
|
||||
latestOplogHash = entry ? entry.hash : null
|
||||
}
|
||||
|
||||
// Create the underlying KeyValue database
|
||||
const keyValueStore = await KeyValue()({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate: _updateIndex })
|
||||
|
||||
// Compute the index
|
||||
await _updateIndex(keyValueStore.log)
|
||||
// Set up the underlying KeyValue database
|
||||
const keyValueStore = await KeyValue()({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate: index.update, encryption })
|
||||
|
||||
/**
|
||||
* Gets a value from the store by key.
|
||||
@ -71,11 +128,10 @@ const KeyValueIndexed = ({ storage } = {}) => async ({ ipfs, identity, address,
|
||||
* @instance
|
||||
*/
|
||||
const get = async (key) => {
|
||||
const value = await index.get(key)
|
||||
if (value) {
|
||||
return value
|
||||
const entry = await index.get(key)
|
||||
if (entry) {
|
||||
return entry.payload.value
|
||||
}
|
||||
return keyValueStore.get(key)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -88,8 +144,12 @@ const KeyValueIndexed = ({ storage } = {}) => async ({ ipfs, identity, address,
|
||||
* @instance
|
||||
*/
|
||||
const iterator = async function * ({ amount } = {}) {
|
||||
const it = keyValueStore.iterator({ amount })
|
||||
for await (const { key, value, hash } of it) {
|
||||
const it = index.iterator({ amount, reverse: true })
|
||||
for await (const record of it) {
|
||||
// 'index' is a LevelStorage that returns a [key, value] pair
|
||||
const entry = record[1]
|
||||
const { key, value } = entry.payload
|
||||
const hash = entry.hash
|
||||
yield { key, value, hash }
|
||||
}
|
||||
}
|
||||
@ -98,16 +158,16 @@ const KeyValueIndexed = ({ storage } = {}) => async ({ ipfs, identity, address,
|
||||
* Closes the index and underlying storage.
|
||||
*/
|
||||
const close = async () => {
|
||||
await index.close()
|
||||
await keyValueStore.close()
|
||||
await index.close()
|
||||
}
|
||||
|
||||
/**
|
||||
* Drops all records from the index and underlying storage.
|
||||
*/
|
||||
const drop = async () => {
|
||||
await index.clear()
|
||||
await keyValueStore.drop()
|
||||
await index.drop()
|
||||
}
|
||||
|
||||
return {
|
||||
|
@ -11,12 +11,12 @@ import Database from '../database.js'
|
||||
const type = 'keyvalue'
|
||||
|
||||
/**
|
||||
* Defines an KeyValue database.
|
||||
* Defines a KeyValue database.
|
||||
* @return {module:Databases.Databases-KeyValue} A KeyValue function.
|
||||
* @memberof module:Databases
|
||||
*/
|
||||
const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
|
||||
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate })
|
||||
const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
|
||||
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption })
|
||||
|
||||
const { addOperation, log } = database
|
||||
|
||||
|
@ -52,7 +52,7 @@ const PublicKeyIdentityProvider = ({ keystore }) => async () => {
|
||||
}
|
||||
|
||||
const key = await keystore.getKey(id) || await keystore.createKey(id)
|
||||
return uint8ArrayToString(key.public.marshal(), 'base16')
|
||||
return uint8ArrayToString(key.publicKey.raw, 'base16')
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -8,7 +8,7 @@
|
||||
* const storage = await MemoryStorage()
|
||||
* const keystore = await KeyStore({ storage })
|
||||
*/
|
||||
import * as crypto from '@libp2p/crypto'
|
||||
import { privateKeyFromRaw, publicKeyFromRaw, generateKeyPair } from '@libp2p/crypto/keys'
|
||||
import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
|
||||
import { toString as uint8ArrayToString } from 'uint8arrays/to-string'
|
||||
import { compare as uint8ArrayCompare } from 'uint8arrays/compare'
|
||||
@ -16,9 +16,6 @@ import ComposedStorage from './storage/composed.js'
|
||||
import LevelStorage from './storage/level.js'
|
||||
import LRUStorage from './storage/lru.js'
|
||||
|
||||
const unmarshal = crypto.keys.supportedKeys.secp256k1.unmarshalSecp256k1PrivateKey
|
||||
const unmarshalPubKey = crypto.keys.supportedKeys.secp256k1.unmarshalSecp256k1PublicKey
|
||||
|
||||
const verifySignature = async (signature, publicKey, data) => {
|
||||
if (!signature) {
|
||||
throw new Error('No signature given')
|
||||
@ -38,7 +35,7 @@ const verifySignature = async (signature, publicKey, data) => {
|
||||
|
||||
let res = false
|
||||
try {
|
||||
const pubKey = unmarshalPubKey(uint8ArrayFromString(publicKey, 'base16'))
|
||||
const pubKey = publicKeyFromRaw(uint8ArrayFromString(publicKey, 'base16'))
|
||||
res = await isValid(pubKey, data, uint8ArrayFromString(signature, 'base16'))
|
||||
} catch (e) {
|
||||
// Catch error: sig length wrong
|
||||
@ -123,8 +120,13 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
* @namespace module:KeyStore~KeyStore
|
||||
* @description The instance returned by {@link module:KeyStore}.
|
||||
*/
|
||||
|
||||
// Persistent storage for keys
|
||||
storage = storage || await ComposedStorage(await LRUStorage({ size: 1000 }), await LevelStorage({ path: path || defaultPath }))
|
||||
|
||||
// Cache for deserialized/unmarshaled keys
|
||||
const keyCache = await LRUStorage({ size: 1000 })
|
||||
|
||||
/**
|
||||
* Closes the KeyStore's underlying storage.
|
||||
* @memberof module:KeyStore~KeyStore
|
||||
@ -133,6 +135,7 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
*/
|
||||
const close = async () => {
|
||||
await storage.close()
|
||||
await keyCache.close()
|
||||
}
|
||||
|
||||
/**
|
||||
@ -143,6 +146,7 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
*/
|
||||
const clear = async () => {
|
||||
await storage.clear()
|
||||
await keyCache.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
@ -160,12 +164,17 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
}
|
||||
|
||||
let hasKey = false
|
||||
try {
|
||||
const storedKey = await storage.get('private_' + id)
|
||||
hasKey = storedKey !== undefined && storedKey !== null
|
||||
} catch (e) {
|
||||
// Catches 'Error: ENOENT: no such file or directory, open <path>'
|
||||
console.error('Error: ENOENT: no such file or directory')
|
||||
let key = await keyCache.get(id)
|
||||
if (key) {
|
||||
hasKey = true
|
||||
} else {
|
||||
try {
|
||||
key = await storage.get('private_' + id)
|
||||
hasKey = key !== undefined && key !== null
|
||||
} catch (e) {
|
||||
// Catches 'Error: ENOENT: no such file or directory, open <path>'
|
||||
console.error('Error: ENOENT: no such file or directory')
|
||||
}
|
||||
}
|
||||
|
||||
return hasKey
|
||||
@ -180,7 +189,11 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
* @instance
|
||||
*/
|
||||
const addKey = async (id, key) => {
|
||||
await storage.put('private_' + id, key.privateKey)
|
||||
const { privateKey } = key
|
||||
await storage.put('private_' + id, privateKey)
|
||||
// Unmarshal the key and add it to the cache
|
||||
const unmarshaledPrivateKey = privateKeyFromRaw(privateKey)
|
||||
await keyCache.put(id, unmarshaledPrivateKey)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -197,18 +210,16 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
}
|
||||
|
||||
// Generate a private key
|
||||
const pair = await crypto.keys.generateKeyPair('secp256k1')
|
||||
const keys = await crypto.keys.unmarshalPrivateKey(pair.bytes)
|
||||
const pubKey = keys.public.marshal()
|
||||
const keyPair = await generateKeyPair('secp256k1')
|
||||
|
||||
const key = {
|
||||
publicKey: pubKey,
|
||||
privateKey: keys.marshal()
|
||||
publicKey: keyPair.publicKey.raw,
|
||||
privateKey: keyPair.raw
|
||||
}
|
||||
|
||||
await addKey(id, key)
|
||||
|
||||
return keys
|
||||
return keyPair
|
||||
}
|
||||
|
||||
/**
|
||||
@ -225,18 +236,26 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
throw new Error('id needed to get a key')
|
||||
}
|
||||
|
||||
let storedKey
|
||||
try {
|
||||
storedKey = await storage.get('private_' + id)
|
||||
} catch (e) {
|
||||
// ignore ENOENT error
|
||||
let key = await keyCache.get(id)
|
||||
|
||||
if (!key) {
|
||||
let storedKey
|
||||
try {
|
||||
storedKey = await storage.get('private_' + id)
|
||||
} catch (e) {
|
||||
// ignore ENOENT error
|
||||
}
|
||||
|
||||
if (!storedKey) {
|
||||
return
|
||||
}
|
||||
|
||||
key = privateKeyFromRaw(storedKey)
|
||||
|
||||
await keyCache.put(id, key)
|
||||
}
|
||||
|
||||
if (!storedKey) {
|
||||
return
|
||||
}
|
||||
|
||||
return unmarshal(storedKey)
|
||||
return key
|
||||
}
|
||||
|
||||
/**
|
||||
@ -259,7 +278,7 @@ const KeyStore = async ({ storage, path } = {}) => {
|
||||
throw new Error('Supported formats are `hex` and `buffer`')
|
||||
}
|
||||
|
||||
const pubKey = keys.public.marshal()
|
||||
const pubKey = keys.publicKey.raw
|
||||
|
||||
return format === 'buffer' ? pubKey : uint8ArrayToString(pubKey, 'base16')
|
||||
}
|
||||
|
@ -16,13 +16,17 @@ const ManifestStore = async ({ ipfs, storage } = {}) => {
|
||||
*/
|
||||
|
||||
storage = storage || await ComposedStorage(
|
||||
await LRUStorage({ size: 1000 }),
|
||||
await LRUStorage({ size: 100000 }),
|
||||
await IPFSBlockStorage({ ipfs, pin: true })
|
||||
)
|
||||
|
||||
const get = async (address) => {
|
||||
const bytes = await storage.get(address)
|
||||
const { value } = await Block.decode({ bytes, codec, hasher })
|
||||
if (value) {
|
||||
// Write to storage to make sure it gets pinned on IPFS
|
||||
await storage.put(address, bytes)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ const hashStringEncoding = base58btc
|
||||
* // { payload: "hello", next: [], ... }
|
||||
* @private
|
||||
*/
|
||||
const create = async (identity, id, payload, clock = null, next = [], refs = []) => {
|
||||
const create = async (identity, id, payload, encryptPayloadFn, clock = null, next = [], refs = []) => {
|
||||
if (identity == null) throw new Error('Identity is required, cannot create entry')
|
||||
if (id == null) throw new Error('Entry requires an id')
|
||||
if (payload == null) throw new Error('Entry requires a payload')
|
||||
@ -63,9 +63,16 @@ const create = async (identity, id, payload, clock = null, next = [], refs = [])
|
||||
|
||||
clock = clock || Clock(identity.publicKey)
|
||||
|
||||
let encryptedPayload
|
||||
|
||||
if (encryptPayloadFn) {
|
||||
const { bytes: encodedPayloadBytes } = await Block.encode({ value: payload, codec, hasher })
|
||||
encryptedPayload = await encryptPayloadFn(encodedPayloadBytes)
|
||||
}
|
||||
|
||||
const entry = {
|
||||
id, // For determining a unique chain
|
||||
payload, // Can be any dag-cbor encodeable data
|
||||
payload: encryptedPayload || payload, // Can be any dag-cbor encodeable data
|
||||
next, // Array of strings of CIDs
|
||||
refs, // Array of strings of CIDs
|
||||
clock, // Clock
|
||||
@ -78,8 +85,13 @@ const create = async (identity, id, payload, clock = null, next = [], refs = [])
|
||||
entry.key = identity.publicKey
|
||||
entry.identity = identity.hash
|
||||
entry.sig = signature
|
||||
entry.payload = payload
|
||||
|
||||
return encode(entry)
|
||||
if (encryptPayloadFn) {
|
||||
entry._payload = encryptedPayload
|
||||
}
|
||||
|
||||
return entry
|
||||
}
|
||||
|
||||
/**
|
||||
@ -97,13 +109,15 @@ const verify = async (identities, entry) => {
|
||||
if (!entry.key) throw new Error("Entry doesn't have a key")
|
||||
if (!entry.sig) throw new Error("Entry doesn't have a signature")
|
||||
|
||||
const e = Object.assign({}, entry)
|
||||
|
||||
const value = {
|
||||
id: entry.id,
|
||||
payload: entry.payload,
|
||||
next: entry.next,
|
||||
refs: entry.refs,
|
||||
clock: entry.clock,
|
||||
v: entry.v
|
||||
id: e.id,
|
||||
payload: e._payload || e.payload,
|
||||
next: e.next,
|
||||
refs: e.refs,
|
||||
clock: e.clock,
|
||||
v: e.v
|
||||
}
|
||||
|
||||
const { bytes } = await Block.encode({ value, codec, hasher })
|
||||
@ -136,7 +150,7 @@ const isEntry = (obj) => {
|
||||
* @private
|
||||
*/
|
||||
const isEqual = (a, b) => {
|
||||
return a && b && a.hash === b.hash
|
||||
return a && b && a.hash && a.hash === b.hash
|
||||
}
|
||||
|
||||
/**
|
||||
@ -146,9 +160,40 @@ const isEqual = (a, b) => {
|
||||
* @memberof module:Log~Entry
|
||||
* @private
|
||||
*/
|
||||
const decode = async (bytes) => {
|
||||
const { value } = await Block.decode({ bytes, codec, hasher })
|
||||
return encode(value)
|
||||
const decode = async (bytes, decryptEntryFn, decryptPayloadFn) => {
|
||||
let cid
|
||||
|
||||
if (decryptEntryFn) {
|
||||
try {
|
||||
const encryptedEntry = await Block.decode({ bytes, codec, hasher })
|
||||
bytes = await decryptEntryFn(encryptedEntry.value)
|
||||
cid = encryptedEntry.cid
|
||||
} catch (e) {
|
||||
throw new Error('Could not decrypt entry')
|
||||
}
|
||||
}
|
||||
|
||||
const decodedEntry = await Block.decode({ bytes, codec, hasher })
|
||||
const entry = decodedEntry.value
|
||||
|
||||
if (decryptPayloadFn) {
|
||||
try {
|
||||
const decryptedPayloadBytes = await decryptPayloadFn(entry.payload)
|
||||
const { value: decryptedPayload } = await Block.decode({ bytes: decryptedPayloadBytes, codec, hasher })
|
||||
entry._payload = entry.payload
|
||||
entry.payload = decryptedPayload
|
||||
} catch (e) {
|
||||
throw new Error('Could not decrypt payload')
|
||||
}
|
||||
}
|
||||
|
||||
cid = cid || decodedEntry.cid
|
||||
const hash = cid.toString(hashStringEncoding)
|
||||
|
||||
return {
|
||||
...entry,
|
||||
hash
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -158,13 +203,28 @@ const decode = async (bytes) => {
|
||||
* @memberof module:Log~Entry
|
||||
* @private
|
||||
*/
|
||||
const encode = async (entry) => {
|
||||
const { cid, bytes } = await Block.encode({ value: entry, codec, hasher })
|
||||
const encode = async (entry, encryptEntryFn, encryptPayloadFn) => {
|
||||
const e = Object.assign({}, entry)
|
||||
|
||||
if (encryptPayloadFn) {
|
||||
e.payload = e._payload
|
||||
}
|
||||
|
||||
delete e._payload
|
||||
delete e.hash
|
||||
|
||||
let { cid, bytes } = await Block.encode({ value: e, codec, hasher })
|
||||
|
||||
if (encryptEntryFn) {
|
||||
bytes = await encryptEntryFn(bytes)
|
||||
const encryptedEntry = await Block.encode({ value: bytes, codec, hasher })
|
||||
cid = encryptedEntry.cid
|
||||
bytes = encryptedEntry.bytes
|
||||
}
|
||||
|
||||
const hash = cid.toString(hashStringEncoding)
|
||||
const clock = Clock(entry.clock.id, entry.clock.time)
|
||||
|
||||
return {
|
||||
...entry,
|
||||
clock,
|
||||
hash,
|
||||
bytes
|
||||
}
|
||||
|
@ -9,19 +9,17 @@ import MemoryStorage from '../storage/memory.js'
|
||||
|
||||
const DefaultStorage = MemoryStorage
|
||||
|
||||
const Heads = async ({ storage, heads }) => {
|
||||
const Heads = async ({ storage, heads, decryptPayloadFn, decryptEntryFn }) => {
|
||||
storage = storage || await DefaultStorage()
|
||||
|
||||
const encoder = new TextEncoder()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
const put = async (heads) => {
|
||||
heads = findHeads(heads)
|
||||
for (const head of heads) {
|
||||
await storage.put(head.hash, head.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
const set = async (heads) => {
|
||||
await storage.clear()
|
||||
await put(heads)
|
||||
const newHeads = heads.map(e => ({ hash: e.hash, next: e.next }))
|
||||
const bytes = encoder.encode(JSON.stringify(newHeads))
|
||||
await storage.put('heads', bytes)
|
||||
}
|
||||
|
||||
const add = async (head) => {
|
||||
@ -30,22 +28,21 @@ const Heads = async ({ storage, heads }) => {
|
||||
return
|
||||
}
|
||||
const newHeads = findHeads([...currentHeads, head])
|
||||
await set(newHeads)
|
||||
|
||||
await put(newHeads)
|
||||
return newHeads
|
||||
}
|
||||
|
||||
const remove = async (hash) => {
|
||||
const currentHeads = await all()
|
||||
const newHeads = currentHeads.filter(e => e.hash !== hash)
|
||||
await set(newHeads)
|
||||
await put(newHeads)
|
||||
}
|
||||
|
||||
const iterator = async function * () {
|
||||
const it = storage.iterator()
|
||||
for await (const [, bytes] of it) {
|
||||
const head = await Entry.decode(bytes)
|
||||
yield head
|
||||
const bytes = await storage.get('heads')
|
||||
const headHashes = bytes ? JSON.parse(decoder.decode(bytes)) : []
|
||||
for (const hash of headHashes) {
|
||||
yield hash
|
||||
}
|
||||
}
|
||||
|
||||
@ -66,11 +63,13 @@ const Heads = async ({ storage, heads }) => {
|
||||
}
|
||||
|
||||
// Initialize the heads if given as parameter
|
||||
await put(heads || [])
|
||||
if (heads) {
|
||||
await put(heads)
|
||||
}
|
||||
|
||||
return {
|
||||
put,
|
||||
set,
|
||||
set: put,
|
||||
add,
|
||||
remove,
|
||||
iterator,
|
||||
|
275
src/oplog/log.js
275
src/oplog/log.js
@ -7,20 +7,17 @@
|
||||
* ["Merkle-CRDTs: Merkle-DAGs meet CRDTs"]{@link https://arxiv.org/abs/2004.00107}
|
||||
*/
|
||||
import LRU from 'lru'
|
||||
import PQueue from 'p-queue'
|
||||
import Entry from './entry.js'
|
||||
import Clock, { tickClock } from './clock.js'
|
||||
import Heads from './heads.js'
|
||||
import ConflictResolution from './conflict-resolution.js'
|
||||
import MemoryStorage from '../storage/memory.js'
|
||||
import OplogStore from './oplog-store.js'
|
||||
|
||||
const { LastWriteWins, NoZeroes } = ConflictResolution
|
||||
|
||||
const randomId = () => new Date().getTime().toString()
|
||||
const maxClockTimeReducer = (res, acc) => Math.max(res, acc.clock.time)
|
||||
|
||||
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
|
||||
const DefaultStorage = MemoryStorage
|
||||
|
||||
// Default AccessController for the Log.
|
||||
// Default policy is that anyone can write to the Log.
|
||||
// Signature of an entry will always be verified regardless of AccessController policy.
|
||||
@ -55,7 +52,7 @@ const DefaultAccessController = async () => {
|
||||
* @memberof module:Log
|
||||
* @instance
|
||||
*/
|
||||
const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn } = {}) => {
|
||||
const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn, encryption } = {}) => {
|
||||
/**
|
||||
* @namespace Log
|
||||
* @description The instance returned by {@link module:Log}
|
||||
@ -67,21 +64,27 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
if (logHeads != null && !Array.isArray(logHeads)) {
|
||||
throw new Error('\'logHeads\' argument must be an array')
|
||||
}
|
||||
|
||||
// Set Log's id
|
||||
const id = logId || randomId()
|
||||
|
||||
// Encryption of entries and payloads
|
||||
encryption = encryption || {}
|
||||
const encryptPayloadFn = encryption.data?.encrypt
|
||||
|
||||
// Access Controller
|
||||
access = access || await DefaultAccessController()
|
||||
// Oplog entry storage
|
||||
const _entries = entryStorage || await DefaultStorage()
|
||||
// Entry index for keeping track which entries are already in the log
|
||||
const _index = indexStorage || await DefaultStorage()
|
||||
// Heads storage
|
||||
headsStorage = headsStorage || await DefaultStorage()
|
||||
// Add heads to the state storage, ie. init the log state
|
||||
const _heads = await Heads({ storage: headsStorage, heads: logHeads })
|
||||
|
||||
// Index and storage of entries for this Log
|
||||
const oplogStore = await OplogStore({ logHeads, entryStorage, indexStorage, headsStorage, encryption })
|
||||
|
||||
// Conflict-resolution sorting function
|
||||
sortFn = NoZeroes(sortFn || LastWriteWins)
|
||||
|
||||
// Internal queues for processing appends and joins in their call-order
|
||||
const appendQueue = new PQueue({ concurrency: 1 })
|
||||
const joinQueue = new PQueue({ concurrency: 1 })
|
||||
|
||||
/**
|
||||
* Returns the clock of the log.
|
||||
* @return {module:Clock}
|
||||
@ -102,8 +105,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @instance
|
||||
*/
|
||||
const heads = async () => {
|
||||
const res = await _heads.all()
|
||||
return res.sort(sortFn).reverse()
|
||||
const heads_ = await oplogStore.heads()
|
||||
return heads_.sort(sortFn).reverse()
|
||||
}
|
||||
|
||||
/**
|
||||
@ -130,16 +133,14 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @instance
|
||||
*/
|
||||
const get = async (hash) => {
|
||||
const bytes = await _entries.get(hash)
|
||||
if (bytes) {
|
||||
const entry = await Entry.decode(bytes)
|
||||
return entry
|
||||
if (!hash) {
|
||||
throw new Error('hash is required')
|
||||
}
|
||||
return oplogStore.get(hash)
|
||||
}
|
||||
|
||||
const has = async (hash) => {
|
||||
const entry = await _index.get(hash)
|
||||
return entry != null
|
||||
return oplogStore.has(hash)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -153,40 +154,45 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @instance
|
||||
*/
|
||||
const append = async (data, options = { referencesCount: 0 }) => {
|
||||
// 1. Prepare entry
|
||||
// 2. Authorize entry
|
||||
// 3. Store entry
|
||||
// 4. return Entry
|
||||
// Get current heads of the log
|
||||
const heads_ = await heads()
|
||||
// Create the next pointers from heads
|
||||
const nexts = heads_.map(entry => entry.hash)
|
||||
// Get references (pointers) to multiple entries in the past
|
||||
// (skips the heads which are covered by the next field)
|
||||
const refs = await getReferences(heads_, options.referencesCount + heads_.length)
|
||||
// Create the entry
|
||||
const entry = await Entry.create(
|
||||
identity,
|
||||
id,
|
||||
data,
|
||||
tickClock(await clock()),
|
||||
nexts,
|
||||
refs
|
||||
)
|
||||
// Authorize the entry
|
||||
const canAppend = await access.canAppend(entry)
|
||||
if (!canAppend) {
|
||||
throw new Error(`Could not append entry:\nKey "${identity.hash}" is not allowed to write to the log`)
|
||||
const task = async () => {
|
||||
// 1. Prepare entry
|
||||
// 2. Authorize entry
|
||||
// 3. Store entry
|
||||
// 4. return Entry
|
||||
|
||||
// Get current heads of the log
|
||||
const heads_ = await heads()
|
||||
// Create the next pointers from heads
|
||||
const nexts = heads_.map(entry => entry.hash)
|
||||
// Get references (pointers) to multiple entries in the past
|
||||
// (skips the heads which are covered by the next field)
|
||||
const refs = await getReferences(heads_, options.referencesCount + heads_.length)
|
||||
|
||||
// Create the entry
|
||||
const entry = await Entry.create(
|
||||
identity,
|
||||
id,
|
||||
data,
|
||||
encryptPayloadFn,
|
||||
tickClock(await clock()),
|
||||
nexts,
|
||||
refs
|
||||
)
|
||||
|
||||
// Authorize the entry
|
||||
const canAppend = await access.canAppend(entry)
|
||||
if (!canAppend) {
|
||||
throw new Error(`Could not append entry:\nKey "${identity.hash}" is not allowed to write to the log`)
|
||||
}
|
||||
|
||||
// Add the entry to the oplog store (=store and index it)
|
||||
const hash = await oplogStore.setHead(entry)
|
||||
|
||||
// Return the appended entry
|
||||
return { ...entry, hash }
|
||||
}
|
||||
|
||||
// The appended entry is now the latest head
|
||||
await _heads.set([entry])
|
||||
// Add entry to the entry storage
|
||||
await _entries.put(entry.hash, entry.bytes)
|
||||
// Add entry to the entry index
|
||||
await _index.put(entry.hash, true)
|
||||
// Return the appended entry
|
||||
return entry
|
||||
return appendQueue.add(task)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -210,9 +216,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
if (!isLog(log)) {
|
||||
throw new Error('Given argument is not an instance of Log')
|
||||
}
|
||||
if (_entries.merge) {
|
||||
await _entries.merge(log.storage)
|
||||
}
|
||||
await oplogStore.storage.merge(log.storage)
|
||||
const heads = await log.heads()
|
||||
for (const entry of heads) {
|
||||
await joinEntry(entry)
|
||||
@ -232,81 +236,79 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @instance
|
||||
*/
|
||||
const joinEntry = async (entry) => {
|
||||
/* 1. Check if the entry is already in the log and return early if it is */
|
||||
const isAlreadyInTheLog = await has(entry.hash)
|
||||
if (isAlreadyInTheLog) {
|
||||
return false
|
||||
}
|
||||
|
||||
const verifyEntry = async (entry) => {
|
||||
// Check that the Entry belongs to this Log
|
||||
if (entry.id !== id) {
|
||||
throw new Error(`Entry's id (${entry.id}) doesn't match the log's id (${id}).`)
|
||||
const task = async () => {
|
||||
/* 1. Check if the entry is already in the log and return early if it is */
|
||||
const isAlreadyInTheLog = await has(entry.hash)
|
||||
if (isAlreadyInTheLog) {
|
||||
return false
|
||||
}
|
||||
// Verify if entry is allowed to be added to the log
|
||||
const canAppend = await access.canAppend(entry)
|
||||
if (!canAppend) {
|
||||
throw new Error(`Could not append entry:\nKey "${entry.identity}" is not allowed to write to the log`)
|
||||
}
|
||||
// Verify signature for the entry
|
||||
const isValid = await Entry.verify(identity, entry)
|
||||
if (!isValid) {
|
||||
throw new Error(`Could not validate signature for entry "${entry.hash}"`)
|
||||
}
|
||||
}
|
||||
|
||||
/* 2. Verify the entry */
|
||||
await verifyEntry(entry)
|
||||
|
||||
/* 3. Find missing entries and connections (=path in the DAG) to the current heads */
|
||||
const headsHashes = (await heads()).map(e => e.hash)
|
||||
const hashesToAdd = new Set([entry.hash])
|
||||
const hashesToGet = new Set([...entry.next, ...entry.refs])
|
||||
const connectedHeads = new Set()
|
||||
|
||||
const traverseAndVerify = async () => {
|
||||
const getEntries = Array.from(hashesToGet.values()).filter(has).map(get)
|
||||
const entries = await Promise.all(getEntries)
|
||||
|
||||
for (const e of entries) {
|
||||
hashesToGet.delete(e.hash)
|
||||
|
||||
await verifyEntry(e)
|
||||
|
||||
hashesToAdd.add(e.hash)
|
||||
|
||||
for (const hash of [...e.next, ...e.refs]) {
|
||||
const isInTheLog = await has(hash)
|
||||
|
||||
if (!isInTheLog && !hashesToAdd.has(hash)) {
|
||||
hashesToGet.add(hash)
|
||||
} else if (headsHashes.includes(hash)) {
|
||||
connectedHeads.add(hash)
|
||||
}
|
||||
const verifyEntry = async (entry) => {
|
||||
// Check that the Entry belongs to this Log
|
||||
if (entry.id !== id) {
|
||||
throw new Error(`Entry's id (${entry.id}) doesn't match the log's id (${id}).`)
|
||||
}
|
||||
// Verify if entry is allowed to be added to the log
|
||||
const canAppend = await access.canAppend(entry)
|
||||
if (!canAppend) {
|
||||
throw new Error(`Could not append entry:\nKey "${entry.identity}" is not allowed to write to the log`)
|
||||
}
|
||||
// Verify signature for the entry
|
||||
const isValid = await Entry.verify(identity, entry)
|
||||
if (!isValid) {
|
||||
throw new Error(`Could not validate signature for entry "${entry.hash}"`)
|
||||
}
|
||||
}
|
||||
|
||||
if (hashesToGet.size > 0) {
|
||||
await traverseAndVerify()
|
||||
/* 2. Verify the entry */
|
||||
await verifyEntry(entry)
|
||||
|
||||
/* 3. Find missing entries and connections (=path in the DAG) to the current heads */
|
||||
const headsHashes = (await heads()).map(e => e.hash)
|
||||
const hashesToAdd = new Set([entry.hash])
|
||||
const hashesToGet = new Set([...entry.next, ...entry.refs])
|
||||
const connectedHeads = new Set()
|
||||
|
||||
const traverseAndVerify = async () => {
|
||||
const getEntries = Array.from(hashesToGet.values()).filter(has).map(get)
|
||||
const entries = await Promise.all(getEntries)
|
||||
|
||||
for (const e of entries) {
|
||||
hashesToGet.delete(e.hash)
|
||||
|
||||
await verifyEntry(e)
|
||||
|
||||
hashesToAdd.add(e.hash)
|
||||
|
||||
for (const hash of [...e.next, ...e.refs]) {
|
||||
const isInTheLog = await has(hash)
|
||||
|
||||
if (!isInTheLog && !hashesToAdd.has(hash)) {
|
||||
hashesToGet.add(hash)
|
||||
} else if (headsHashes.includes(hash)) {
|
||||
connectedHeads.add(hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hashesToGet.size > 0) {
|
||||
await traverseAndVerify()
|
||||
}
|
||||
}
|
||||
|
||||
await traverseAndVerify()
|
||||
|
||||
/* 4. Add missing entries to the oplog store (=to the log) */
|
||||
await oplogStore.addVerified(hashesToAdd.values())
|
||||
/* 6. Remove heads which new entries are connect to */
|
||||
await oplogStore.removeHeads(connectedHeads.values())
|
||||
/* 7. Add the new entry to heads (=union with current heads) */
|
||||
await oplogStore.addHead(entry)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
await traverseAndVerify()
|
||||
|
||||
/* 4. Add missing entries to the index (=to the log) */
|
||||
for (const hash of hashesToAdd.values()) {
|
||||
await _index.put(hash, true)
|
||||
}
|
||||
|
||||
/* 5. Remove heads which new entries are connect to */
|
||||
for (const hash of connectedHeads.values()) {
|
||||
await _heads.remove(hash)
|
||||
}
|
||||
|
||||
/* 6. Add the new entry to heads (=union with current heads) */
|
||||
await _heads.add(entry)
|
||||
|
||||
return true
|
||||
return joinQueue.add(task)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -314,7 +316,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @memberof module:Log~Log
|
||||
* @instance
|
||||
*/
|
||||
const traverse = async function * (rootEntries, shouldStopFn, useRefs = true) {
|
||||
const traverse = async function * (rootEntries, shouldStopFn) {
|
||||
// By default, we don't stop traversal and traverse
|
||||
// until the end of the log
|
||||
const defaultStopFn = () => false
|
||||
@ -338,7 +340,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
// Get the next entry from the stack
|
||||
entry = stack.pop()
|
||||
if (entry) {
|
||||
const { hash, next, refs } = entry
|
||||
const { hash, next } = entry
|
||||
// If we have an entry that we haven't traversed yet, process it
|
||||
if (!traversed[hash]) {
|
||||
// Yield the current entry
|
||||
@ -353,7 +355,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
fetched[hash] = true
|
||||
// Add the next and refs hashes to the list of hashes to fetch next,
|
||||
// filter out traversed and fetched hashes
|
||||
toFetch = [...toFetch, ...next, ...(useRefs ? refs : [])].filter(notIndexed)
|
||||
toFetch = [...toFetch, ...next].filter(notIndexed)
|
||||
// Function to fetch an entry and making sure it's not a duplicate (check the hash indices)
|
||||
const fetchEntries = (hash) => {
|
||||
if (!traversed[hash] && !fetched[hash]) {
|
||||
@ -367,7 +369,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
// Add the next and refs fields from the fetched entries to the next round
|
||||
toFetch = nexts
|
||||
.filter(e => e !== null && e !== undefined)
|
||||
.reduce((res, acc) => Array.from(new Set([...res, ...acc.next, ...(useRefs ? acc.refs : [])])), [])
|
||||
.reduce((res, acc) => Array.from(new Set([...res, ...acc.next])), [])
|
||||
.filter(notIndexed)
|
||||
// Add the fetched entries to the stack to be processed
|
||||
stack = [...nexts, ...stack]
|
||||
@ -486,9 +488,9 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @instance
|
||||
*/
|
||||
const clear = async () => {
|
||||
await _index.clear()
|
||||
await _heads.clear()
|
||||
await _entries.clear()
|
||||
await appendQueue.clear()
|
||||
await joinQueue.clear()
|
||||
await oplogStore.clear()
|
||||
}
|
||||
|
||||
/**
|
||||
@ -497,9 +499,9 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
* @instance
|
||||
*/
|
||||
const close = async () => {
|
||||
await _index.close()
|
||||
await _heads.close()
|
||||
await _entries.close()
|
||||
await appendQueue.onIdle()
|
||||
await joinQueue.onIdle()
|
||||
await oplogStore.close()
|
||||
}
|
||||
|
||||
/**
|
||||
@ -531,7 +533,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
const shouldStopTraversal = async (entry) => {
|
||||
return refs.length >= amount && amount !== -1
|
||||
}
|
||||
for await (const { hash } of traverse(heads, shouldStopTraversal, false)) {
|
||||
for await (const { hash } of traverse(heads, shouldStopTraversal)) {
|
||||
refs.push(hash)
|
||||
}
|
||||
refs = refs.slice(heads.length + 1, amount)
|
||||
@ -555,7 +557,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
|
||||
close,
|
||||
access,
|
||||
identity,
|
||||
storage: _entries
|
||||
storage: oplogStore.storage,
|
||||
encryption
|
||||
}
|
||||
}
|
||||
|
||||
|
112
src/oplog/oplog-store.js
Normal file
112
src/oplog/oplog-store.js
Normal file
@ -0,0 +1,112 @@
|
||||
import Entry from './entry.js'
|
||||
import Heads from './heads.js'
|
||||
import MemoryStorage from '../storage/memory.js'
|
||||
|
||||
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
|
||||
const DefaultStorage = MemoryStorage
|
||||
|
||||
const OplogStore = async ({ logHeads, entryStorage, headsStorage, indexStorage, encryption }) => {
|
||||
// Setup encryption and decryption functions
|
||||
const encryptEntryFn = encryption?.replication?.encrypt
|
||||
const decryptEntryFn = encryption?.replication?.decrypt
|
||||
const encryptPayloadFn = encryption?.data?.encrypt
|
||||
const decryptPayloadFn = encryption?.data?.decrypt
|
||||
// Oplog entry storage
|
||||
const _entries = entryStorage || await DefaultStorage()
|
||||
// Entry index for keeping track which entries are already in the log
|
||||
const _index = indexStorage || await DefaultStorage()
|
||||
// Heads storage
|
||||
headsStorage = headsStorage || await DefaultStorage()
|
||||
// Add heads to the state storage, ie. init the log state
|
||||
const _heads = await Heads({ storage: headsStorage, heads: logHeads, decryptPayloadFn, decryptEntryFn })
|
||||
|
||||
const get = async (hash) => {
|
||||
const bytes = await _entries.get(hash)
|
||||
if (bytes) {
|
||||
const entry = await Entry.decode(bytes, decryptEntryFn, decryptPayloadFn)
|
||||
return entry
|
||||
}
|
||||
}
|
||||
|
||||
const getBytes = async (hash) => {
|
||||
return _entries.get(hash)
|
||||
}
|
||||
|
||||
const has = async (hash) => {
|
||||
const entry = await _index.get(hash)
|
||||
return entry != null
|
||||
}
|
||||
|
||||
const heads = async () => {
|
||||
const heads_ = []
|
||||
for (const { hash } of await _heads.all()) {
|
||||
const head = await get(hash)
|
||||
heads_.push(head)
|
||||
}
|
||||
return heads_
|
||||
}
|
||||
|
||||
const setHead = async (entry) => {
|
||||
const { hash, bytes } = await Entry.encode(entry, encryptEntryFn, encryptPayloadFn)
|
||||
// Add entry to the entry storage
|
||||
await _entries.put(hash, bytes)
|
||||
// Add entry to the entry index
|
||||
await _index.put(hash, true)
|
||||
// The appended entry is now the latest head
|
||||
await _heads.set([{ hash, next: entry.next }])
|
||||
|
||||
return hash
|
||||
}
|
||||
|
||||
const addHead = async (entry) => {
|
||||
/* 7. Add the new entry to heads (=union with current heads) */
|
||||
await _heads.add(entry)
|
||||
return entry.hash
|
||||
}
|
||||
|
||||
const removeHeads = async (hashes) => {
|
||||
/* 5. Remove heads which new entries are connect to */
|
||||
for (const hash of hashes) {
|
||||
await _heads.remove(hash)
|
||||
}
|
||||
}
|
||||
|
||||
const addVerified = async (hashes) => {
|
||||
/* 4. Add missing entries to the index (=to the log) */
|
||||
for (const hash of hashes) {
|
||||
await _index.put(hash, true)
|
||||
/* 5. Add new entry to entries (for pinning) */
|
||||
if (_entries.persist) {
|
||||
await _entries.persist(hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const clear = async () => {
|
||||
await _index.clear()
|
||||
await _heads.clear()
|
||||
await _entries.clear()
|
||||
}
|
||||
|
||||
const close = async () => {
|
||||
await _index.close()
|
||||
await _heads.close()
|
||||
await _entries.close()
|
||||
}
|
||||
|
||||
return {
|
||||
get,
|
||||
getBytes,
|
||||
has,
|
||||
heads,
|
||||
setHead,
|
||||
addHead,
|
||||
removeHeads,
|
||||
addVerified,
|
||||
storage: _entries,
|
||||
clear,
|
||||
close
|
||||
}
|
||||
}
|
||||
|
||||
export default OplogStore
|
@ -104,6 +104,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
|
||||
* @param {module:Storage} [params.indexStorage=[ComposedStorage]{@link module:Storage.Storage-Composed}] A compatible storage instance for storing an " index of log entries. Defaults to ComposedStorage(LRUStorage, LevelStorage).
|
||||
* @param {number} [params.referencesCount] The number of references to
|
||||
* use for [Log]{@link module:Log} entries.
|
||||
* @param {number} [params.encryption] Encryption module to encrypt/decrypt database payloads and entries. If provided, the encryption object must take the form { replication: { encrypt, decrypt }, data: { encrypt, decrypt } }.
|
||||
* @memberof module:OrbitDB
|
||||
* @return {module:Database} A database instance.
|
||||
* @throws "Unsupported database type" if the type specified is not in the list
|
||||
@ -112,7 +113,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
|
||||
* @instance
|
||||
* @async
|
||||
*/
|
||||
const open = async (address, { type, meta, sync, Database, AccessController, headsStorage, entryStorage, indexStorage, referencesCount } = {}) => {
|
||||
const open = async (address, { type, meta, sync, Database, AccessController, headsStorage, entryStorage, indexStorage, referencesCount, encryption } = {}) => {
|
||||
let name, manifest, accessController
|
||||
|
||||
if (databases[address]) {
|
||||
@ -153,7 +154,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
|
||||
|
||||
address = address.toString()
|
||||
|
||||
const db = await Database({ ipfs, identity, address, name, access: accessController, directory, meta, syncAutomatically: sync, headsStorage, entryStorage, indexStorage, referencesCount })
|
||||
const db = await Database({ ipfs, identity, address, name, access: accessController, directory, meta, syncAutomatically: sync, headsStorage, entryStorage, indexStorage, referencesCount, encryption })
|
||||
|
||||
db.events.on('close', onDatabaseClosed(address))
|
||||
|
||||
@ -193,6 +194,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
|
||||
ipfs,
|
||||
directory,
|
||||
keystore,
|
||||
identities,
|
||||
identity,
|
||||
peerId
|
||||
}
|
||||
|
@ -57,6 +57,18 @@ const ComposedStorage = async (storage1, storage2) => {
|
||||
return value
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a value from storage.
|
||||
* @function
|
||||
* @param {string} hash The hash of the value to delete.
|
||||
* @memberof module:Storage.Storage-Composed
|
||||
* @instance
|
||||
*/
|
||||
const del = async (hash) => {
|
||||
await storage1.del(hash)
|
||||
await storage2.del(hash)
|
||||
}
|
||||
|
||||
/**
|
||||
* Iterates over records stored in both storages.
|
||||
* @function
|
||||
@ -64,10 +76,11 @@ const ComposedStorage = async (storage1, storage2) => {
|
||||
* @memberof module:Storage.Storage-Composed
|
||||
* @instance
|
||||
*/
|
||||
const iterator = async function * () {
|
||||
const iterator = async function * ({ amount, reverse } = {}) {
|
||||
const keys = []
|
||||
const iteratorOptions = { amount: amount || -1, reverse: reverse || false }
|
||||
for (const storage of [storage1, storage2]) {
|
||||
for await (const [key, value] of storage.iterator()) {
|
||||
for await (const [key, value] of storage.iterator(iteratorOptions)) {
|
||||
if (!keys[key]) {
|
||||
keys[key] = true
|
||||
yield [key, value]
|
||||
@ -101,6 +114,16 @@ const ComposedStorage = async (storage1, storage2) => {
|
||||
await storage2.clear()
|
||||
}
|
||||
|
||||
const persist = async (hash) => {
|
||||
if (storage1.persist) {
|
||||
await storage1.persist(hash)
|
||||
}
|
||||
|
||||
if (storage2.persist) {
|
||||
await storage2.persist(hash)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls close on each of the composed storages.
|
||||
* @function
|
||||
@ -115,6 +138,8 @@ const ComposedStorage = async (storage1, storage2) => {
|
||||
return {
|
||||
put,
|
||||
get,
|
||||
del,
|
||||
persist,
|
||||
iterator,
|
||||
merge,
|
||||
clear,
|
||||
|
@ -7,6 +7,7 @@
|
||||
import { CID } from 'multiformats/cid'
|
||||
import { base58btc } from 'multiformats/bases/base58'
|
||||
import { TimeoutController } from 'timeout-abort-controller'
|
||||
import drain from 'it-drain'
|
||||
|
||||
const DefaultTimeout = 30000 // 30 seconds
|
||||
|
||||
@ -27,6 +28,8 @@ const DefaultTimeout = 30000 // 30 seconds
|
||||
const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
|
||||
if (!ipfs) throw new Error('An instance of ipfs is required.')
|
||||
|
||||
const timeoutControllers = new Set()
|
||||
|
||||
/**
|
||||
* Puts data to an IPFS block.
|
||||
* @function
|
||||
@ -40,9 +43,7 @@ const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
|
||||
const { signal } = new TimeoutController(timeout || DefaultTimeout)
|
||||
await ipfs.blockstore.put(cid, data, { signal })
|
||||
|
||||
if (pin && !(await ipfs.pins.isPinned(cid))) {
|
||||
await ipfs.pins.add(cid)
|
||||
}
|
||||
await persist(hash)
|
||||
}
|
||||
|
||||
const del = async (hash) => {}
|
||||
@ -57,25 +58,40 @@ const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
|
||||
*/
|
||||
const get = async (hash) => {
|
||||
const cid = CID.parse(hash, base58btc)
|
||||
const { signal } = new TimeoutController(timeout || DefaultTimeout)
|
||||
const block = await ipfs.blockstore.get(cid, { signal })
|
||||
const controller = new TimeoutController(timeout || DefaultTimeout)
|
||||
timeoutControllers.add(controller)
|
||||
const block = await ipfs.blockstore.get(cid, { signal: controller.signal })
|
||||
timeoutControllers.delete(controller)
|
||||
if (block) {
|
||||
return block
|
||||
}
|
||||
}
|
||||
|
||||
const persist = async (hash) => {
|
||||
const cid = CID.parse(hash, base58btc)
|
||||
if (pin && !(await ipfs.pins.isPinned(cid))) {
|
||||
await drain(ipfs.pins.add(cid))
|
||||
}
|
||||
}
|
||||
|
||||
const iterator = async function * () {}
|
||||
|
||||
const merge = async (other) => {}
|
||||
|
||||
const clear = async () => {}
|
||||
|
||||
const close = async () => {}
|
||||
const close = async () => {
|
||||
for (const controller in timeoutControllers) {
|
||||
controller.abort()
|
||||
}
|
||||
timeoutControllers.clear()
|
||||
}
|
||||
|
||||
return {
|
||||
put,
|
||||
del,
|
||||
get,
|
||||
persist,
|
||||
iterator,
|
||||
merge,
|
||||
clear,
|
||||
|
@ -78,8 +78,9 @@ const LevelStorage = async ({ path, valueEncoding } = {}) => {
|
||||
* @memberof module:Storage.Storage-Level
|
||||
* @instance
|
||||
*/
|
||||
const iterator = async function * () {
|
||||
for await (const [key, value] of db.iterator()) {
|
||||
const iterator = async function * ({ amount, reverse } = {}) {
|
||||
const iteratorOptions = { limit: amount || -1, reverse: reverse || false }
|
||||
for await (const [key, value] of db.iterator(iteratorOptions)) {
|
||||
yield [key, value]
|
||||
}
|
||||
}
|
||||
|
37
src/sync.js
37
src/sync.js
@ -3,6 +3,7 @@ import PQueue from 'p-queue'
|
||||
import { EventEmitter } from 'events'
|
||||
import { TimeoutController } from 'timeout-abort-controller'
|
||||
import pathJoin from './utils/path-join.js'
|
||||
import { Entry } from './oplog/index.js'
|
||||
|
||||
const DefaultTimeout = 30000 // 30 seconds
|
||||
|
||||
@ -19,7 +20,9 @@ const DefaultTimeout = 30000 // 30 seconds
|
||||
* Upon subscribing to the topic, peers already connected to the topic receive
|
||||
* the subscription message and "dial" the subscribing peer using a libp2p
|
||||
* custom protocol. Once connected to the subscribing peer on a direct
|
||||
* peer-to-peer connection, the dialing peer and the subscribing peer exchange * the heads of the Log each peer currently has. Once completed, the peers have * the same "local state".
|
||||
* peer-to-peer connection, the dialing peer and the subscribing peer exchange
|
||||
* the heads of the Log each peer currently has. Once completed, the peers have
|
||||
* the same "local state".
|
||||
*
|
||||
* Once the initial sync has completed, peers notify one another of updates to
|
||||
* the log, ie. updates to the database, using the initially opened pubsub
|
||||
@ -132,7 +135,7 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
*/
|
||||
events = events || new EventEmitter()
|
||||
|
||||
timeout = timeout || DefaultTimeout
|
||||
timeout ??= DefaultTimeout
|
||||
|
||||
let started = false
|
||||
|
||||
@ -144,7 +147,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
const sendHeads = (source) => {
|
||||
return (async function * () {
|
||||
const heads = await log.heads()
|
||||
for await (const { bytes } of heads) {
|
||||
for await (const { hash } of heads) {
|
||||
const bytes = await log.storage.get(hash)
|
||||
yield bytes
|
||||
}
|
||||
})()
|
||||
@ -154,7 +158,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
for await (const value of source) {
|
||||
const headBytes = value.subarray()
|
||||
if (headBytes && onSynced) {
|
||||
await onSynced(headBytes)
|
||||
const entry = await Entry.decode(headBytes, log.encryption.replication?.decrypt, log.encryption.data?.decrypt)
|
||||
await onSynced(entry)
|
||||
}
|
||||
}
|
||||
if (started) {
|
||||
@ -192,9 +197,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
const stream = await libp2p.dialProtocol(remotePeer, headsSyncAddress, { signal })
|
||||
await pipe(sendHeads, stream, receiveHeads(peerId))
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
peers.delete(peerId)
|
||||
if (e.code === 'ERR_UNSUPPORTED_PROTOCOL') {
|
||||
if (e.name === 'UnsupportedProtocolError') {
|
||||
// Skip peer, they don't have this database currently
|
||||
} else {
|
||||
events.emit('error', e)
|
||||
@ -218,7 +222,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
const task = async () => {
|
||||
try {
|
||||
if (data && onSynced) {
|
||||
await onSynced(data)
|
||||
const entry = await Entry.decode(data, log.encryption.replication?.decrypt, log.encryption.data?.decrypt)
|
||||
await onSynced(entry)
|
||||
}
|
||||
} catch (e) {
|
||||
events.emit('error', e)
|
||||
@ -230,6 +235,10 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
}
|
||||
}
|
||||
|
||||
const handlePeerDisconnected = async event => {
|
||||
peers.delete(event.detail.toString())
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a log entry to the Sync Protocol to be sent to peers.
|
||||
* @function add
|
||||
@ -238,8 +247,9 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
* @instance
|
||||
*/
|
||||
const add = async (entry) => {
|
||||
if (started) {
|
||||
await pubsub.publish(address, entry.bytes)
|
||||
if (started && entry && entry.hash) {
|
||||
const bytes = await log.storage.get(entry.hash)
|
||||
await pubsub.publish(address, bytes)
|
||||
}
|
||||
}
|
||||
|
||||
@ -252,11 +262,12 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
const stopSync = async () => {
|
||||
if (started) {
|
||||
started = false
|
||||
await queue.onIdle()
|
||||
await queue.clear()
|
||||
pubsub.removeEventListener('subscription-change', handlePeerSubscribed)
|
||||
pubsub.removeEventListener('message', handleUpdateMessage)
|
||||
await libp2p.unhandle(headsSyncAddress)
|
||||
await pubsub.unsubscribe(address)
|
||||
libp2p.removeEventListener('peer:disconnect', handlePeerDisconnected)
|
||||
peers.clear()
|
||||
}
|
||||
}
|
||||
@ -269,12 +280,14 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
|
||||
*/
|
||||
const startSync = async () => {
|
||||
if (!started) {
|
||||
// Exchange head entries with peers when connected
|
||||
await libp2p.handle(headsSyncAddress, handleReceiveHeads)
|
||||
pubsub.addEventListener('subscription-change', handlePeerSubscribed)
|
||||
pubsub.addEventListener('message', handleUpdateMessage)
|
||||
// Subscribe to the pubsub channel for this database through which updates are sent
|
||||
await pubsub.subscribe(address)
|
||||
// Exchange head entries with peers when connected
|
||||
await libp2p.handle(headsSyncAddress, handleReceiveHeads)
|
||||
// Remove disconnected peers from `peers`, as otherwise they will not resync heads on reconnection
|
||||
libp2p.addEventListener('peer:disconnect', handlePeerDisconnected)
|
||||
started = true
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,7 @@
|
||||
import * as crypto from '@libp2p/crypto'
|
||||
import { privateKeyFromRaw } from '@libp2p/crypto/keys'
|
||||
import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
|
||||
import { Identities, KeyStore } from '../../src/index.js'
|
||||
|
||||
const unmarshal = crypto.keys.supportedKeys.secp256k1.unmarshalSecp256k1PrivateKey
|
||||
const unmarshalPubKey = crypto.keys.supportedKeys.secp256k1.unmarshalSecp256k1PublicKey
|
||||
|
||||
const keysPath = './testkeys'
|
||||
|
||||
const isBrowser = () => typeof window !== 'undefined'
|
||||
@ -52,10 +49,10 @@ before(async () => {
|
||||
]
|
||||
|
||||
for (let user of users) {
|
||||
const privateKey1 = unmarshal(uint8ArrayFromString(user.privateKey, 'base16'))
|
||||
const privateKey2 = unmarshal(uint8ArrayFromString(user.identity.privateKey, 'base16'))
|
||||
await keystore.addKey(user.id, { privateKey: privateKey1.marshal() })
|
||||
await keystore.addKey(user.identity.id, { privateKey: privateKey2.marshal() })
|
||||
const privateKey1 = privateKeyFromRaw(uint8ArrayFromString(user.privateKey, 'base16'))
|
||||
const privateKey2 = privateKeyFromRaw(uint8ArrayFromString(user.identity.privateKey, 'base16'))
|
||||
await keystore.addKey(user.id, { privateKey: privateKey1.raw })
|
||||
await keystore.addKey(user.identity.id, { privateKey: privateKey2.raw })
|
||||
}
|
||||
|
||||
await keystore.close()
|
||||
|
@ -196,27 +196,20 @@ describe('Database - Replication', function () {
|
||||
db1 = await Database({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1', entryStorage: storage1 })
|
||||
db2 = await Database({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2', entryStorage: storage2 })
|
||||
|
||||
let connected1 = false
|
||||
let connected2 = false
|
||||
let connected = false
|
||||
|
||||
const onConnected1 = (peerId, heads) => {
|
||||
connected1 = true
|
||||
const onConnected = (peerId, heads) => {
|
||||
connected = true
|
||||
}
|
||||
|
||||
const onConnected2 = (peerId, heads) => {
|
||||
connected2 = true
|
||||
}
|
||||
|
||||
db1.events.on('join', onConnected1)
|
||||
db2.events.on('join', onConnected2)
|
||||
db2.events.on('join', onConnected)
|
||||
|
||||
await db1.addOperation({ op: 'PUT', key: 1, value: 'record 1 on db 1' })
|
||||
await db1.addOperation({ op: 'PUT', key: 2, value: 'record 2 on db 1' })
|
||||
await db1.addOperation({ op: 'PUT', key: 3, value: 'record 3 on db 1' })
|
||||
await db1.addOperation({ op: 'PUT', key: 4, value: 'record 4 on db 1' })
|
||||
|
||||
await waitFor(() => connected1, () => true)
|
||||
await waitFor(() => connected2, () => true)
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
const all1 = []
|
||||
for await (const item of db1.log.iterator()) {
|
||||
|
@ -3,7 +3,7 @@ import { rimraf } from 'rimraf'
|
||||
import { existsSync } from 'fs'
|
||||
import { copy } from 'fs-extra'
|
||||
import Path from 'path'
|
||||
import { Database, Entry, KeyStore, Identities } from '../src/index.js'
|
||||
import { Database, KeyStore, Identities } from '../src/index.js'
|
||||
import LevelStorage from '../src/storage/level.js'
|
||||
import MemoryStorage from '../src/storage/memory.js'
|
||||
import testKeysPath from './fixtures/test-keys-path.js'
|
||||
@ -68,8 +68,13 @@ describe('Database', function () {
|
||||
describe('Options', () => {
|
||||
it('uses default directory for headsStorage', async () => {
|
||||
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController })
|
||||
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
|
||||
const hash = await db.addOperation(op)
|
||||
|
||||
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
|
||||
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
|
||||
|
||||
await db.addOperation(op1)
|
||||
const hash = await db.addOperation(op2)
|
||||
const entry = await db.log.get(hash)
|
||||
|
||||
const headsPath = Path.join('./orbitdb/', `${databaseId}/`, '/log/_heads/')
|
||||
|
||||
@ -78,8 +83,13 @@ describe('Database', function () {
|
||||
await db.close()
|
||||
|
||||
const headsStorage = await LevelStorage({ path: headsPath })
|
||||
const bytes = await headsStorage.get('heads')
|
||||
const heads = JSON.parse(new TextDecoder().decode(bytes))
|
||||
|
||||
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op)
|
||||
strictEqual(heads.length, 1)
|
||||
strictEqual(heads.at(0).hash, hash)
|
||||
strictEqual(heads.at(0).next.length, 1)
|
||||
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
|
||||
|
||||
await headsStorage.close()
|
||||
|
||||
@ -88,8 +98,12 @@ describe('Database', function () {
|
||||
|
||||
it('uses given directory for headsStorage', async () => {
|
||||
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './custom-directory' })
|
||||
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
|
||||
const hash = await db.addOperation(op)
|
||||
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
|
||||
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
|
||||
|
||||
await db.addOperation(op1)
|
||||
const hash = await db.addOperation(op2)
|
||||
const entry = await db.log.get(hash)
|
||||
|
||||
const headsPath = Path.join('./custom-directory/', `${databaseId}/`, '/log/_heads/')
|
||||
|
||||
@ -99,7 +113,13 @@ describe('Database', function () {
|
||||
|
||||
const headsStorage = await LevelStorage({ path: headsPath })
|
||||
|
||||
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op)
|
||||
const bytes = await headsStorage.get('heads')
|
||||
const heads = JSON.parse(new TextDecoder().decode(bytes))
|
||||
|
||||
strictEqual(heads.length, 1)
|
||||
strictEqual(heads.at(0).hash, hash)
|
||||
strictEqual(heads.at(0).next.length, 1)
|
||||
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
|
||||
|
||||
await headsStorage.close()
|
||||
|
||||
@ -110,23 +130,51 @@ describe('Database', function () {
|
||||
it('uses given MemoryStorage for headsStorage', async () => {
|
||||
const headsStorage = await MemoryStorage()
|
||||
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage })
|
||||
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
|
||||
const hash = await db.addOperation(op)
|
||||
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
|
||||
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
|
||||
|
||||
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op)
|
||||
await db.addOperation(op1)
|
||||
const hash = await db.addOperation(op2)
|
||||
const entry = await db.log.get(hash)
|
||||
|
||||
const bytes = await headsStorage.get('heads')
|
||||
const heads = JSON.parse(new TextDecoder().decode(bytes))
|
||||
|
||||
strictEqual(heads.length, 1)
|
||||
strictEqual(heads.at(0).hash, hash)
|
||||
strictEqual(heads.at(0).next.length, 1)
|
||||
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
|
||||
|
||||
await db.close()
|
||||
|
||||
await headsStorage.close()
|
||||
await rimraf('./orbitdb')
|
||||
})
|
||||
|
||||
it('uses given MemoryStorage for entryStorage', async () => {
|
||||
const entryStorage = await MemoryStorage()
|
||||
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', entryStorage })
|
||||
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
|
||||
const hash = await db.addOperation(op)
|
||||
const headsStorage = await MemoryStorage()
|
||||
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage, entryStorage })
|
||||
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
|
||||
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
|
||||
|
||||
deepStrictEqual((await Entry.decode(await entryStorage.get(hash))).payload, op)
|
||||
await db.addOperation(op1)
|
||||
const hash = await db.addOperation(op2)
|
||||
const entry = await db.log.get(hash)
|
||||
|
||||
const bytes = await headsStorage.get('heads')
|
||||
const heads = JSON.parse(new TextDecoder().decode(bytes))
|
||||
|
||||
strictEqual(heads.length, 1)
|
||||
strictEqual(heads.at(0).hash, hash)
|
||||
strictEqual(heads.at(0).next.length, 1)
|
||||
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
|
||||
|
||||
await db.close()
|
||||
|
||||
await entryStorage.close()
|
||||
await headsStorage.close()
|
||||
await rimraf('./orbitdb')
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -287,7 +287,7 @@ describe('KeyValueIndexed Database', function () {
|
||||
await db.put('key', 'value')
|
||||
|
||||
let result
|
||||
for await (const [key, value] of storage.iterator()) {
|
||||
for await (const { key, value } of db.iterator()) {
|
||||
result = [key, value]
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ import createHelia from '../../utils/create-helia.js'
|
||||
const keysPath = './testkeys'
|
||||
|
||||
describe('Documents Database Replication', function () {
|
||||
this.timeout(30000)
|
||||
this.timeout(10000)
|
||||
|
||||
let ipfs1, ipfs2
|
||||
let keystore
|
||||
|
@ -24,7 +24,7 @@ describe('KeyValueIndexed Database Replication', function () {
|
||||
const accessController = {
|
||||
canAppend: async (entry) => {
|
||||
const identity = await identities.getIdentity(entry.identity)
|
||||
return identity.id === testIdentity1.id
|
||||
return identity.id === testIdentity1.id || identity.id === testIdentity2.id
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,6 +32,12 @@ describe('KeyValueIndexed Database Replication', function () {
|
||||
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
|
||||
await rimraf(keysPath)
|
||||
await rimraf('./orbitdb1')
|
||||
await rimraf('./orbitdb2')
|
||||
await rimraf('./ipfs1')
|
||||
await rimraf('./ipfs2')
|
||||
|
||||
await copy(testKeysPath, keysPath)
|
||||
keystore = await KeyStore({ path: keysPath })
|
||||
identities = await Identities({ keystore })
|
||||
@ -123,8 +129,8 @@ describe('KeyValueIndexed Database Replication', function () {
|
||||
all2.push(keyValue)
|
||||
}
|
||||
deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'hello', value: 'friend3' },
|
||||
{ key: 'init', value: true }
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend3' }
|
||||
])
|
||||
|
||||
const all1 = []
|
||||
@ -132,8 +138,8 @@ describe('KeyValueIndexed Database Replication', function () {
|
||||
all1.push(keyValue)
|
||||
}
|
||||
deepStrictEqual(all1.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'hello', value: 'friend3' },
|
||||
{ key: 'init', value: true }
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend3' }
|
||||
])
|
||||
})
|
||||
|
||||
@ -196,8 +202,8 @@ describe('KeyValueIndexed Database Replication', function () {
|
||||
all2.push(keyValue)
|
||||
}
|
||||
deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'hello', value: 'friend3' },
|
||||
{ key: 'init', value: true }
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend3' }
|
||||
])
|
||||
|
||||
const all1 = []
|
||||
@ -205,8 +211,167 @@ describe('KeyValueIndexed Database Replication', function () {
|
||||
all1.push(keyValue)
|
||||
}
|
||||
deepStrictEqual(all1.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'hello', value: 'friend3' },
|
||||
{ key: 'init', value: true }
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend3' }
|
||||
])
|
||||
})
|
||||
|
||||
it('indexes the database correctly', async () => {
|
||||
let replicated1 = false
|
||||
let replicated2 = false
|
||||
let replicated3 = false
|
||||
let expectedEntryHash1 = null
|
||||
let expectedEntryHash2 = null
|
||||
let expectedEntryHash3 = null
|
||||
|
||||
const onError = (err) => {
|
||||
console.error(err)
|
||||
deepStrictEqual(err, undefined)
|
||||
}
|
||||
|
||||
const onUpdate = (entry) => {
|
||||
replicated1 = expectedEntryHash1 !== null && entry.hash === expectedEntryHash1
|
||||
}
|
||||
|
||||
kv1 = await KeyValueIndexed()({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
|
||||
kv2 = await KeyValueIndexed()({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
|
||||
|
||||
kv2.events.on('update', onUpdate)
|
||||
|
||||
kv2.events.on('error', onError)
|
||||
kv1.events.on('error', onError)
|
||||
|
||||
await kv1.set('init', true)
|
||||
await kv1.set('hello', 'friend')
|
||||
await kv1.del('hello')
|
||||
await kv1.set('hello', 'friend2')
|
||||
await kv1.del('hello')
|
||||
await kv1.set('empty', '')
|
||||
await kv1.del('empty')
|
||||
expectedEntryHash1 = await kv1.set('hello', 'friend3')
|
||||
|
||||
await waitFor(() => replicated1, () => true)
|
||||
|
||||
await kv1.close()
|
||||
|
||||
await kv2.set('A', 'AAA')
|
||||
await kv2.set('B', 'BBB')
|
||||
expectedEntryHash3 = await kv2.set('C', 'CCC')
|
||||
|
||||
await kv2.close()
|
||||
|
||||
kv1 = await KeyValueIndexed()({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
|
||||
|
||||
const onUpdate3 = async (entry) => {
|
||||
replicated3 = expectedEntryHash3 && entry.hash === expectedEntryHash3
|
||||
}
|
||||
|
||||
kv1.events.on('update', onUpdate3)
|
||||
kv1.events.on('error', onError)
|
||||
|
||||
await kv1.set('one', 1)
|
||||
await kv1.set('two', 2)
|
||||
await kv1.set('three', 3)
|
||||
await kv1.del('three')
|
||||
expectedEntryHash2 = await kv1.set('four', 4)
|
||||
|
||||
kv2 = await KeyValueIndexed()({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
|
||||
|
||||
const onUpdate2 = (entry) => {
|
||||
replicated2 = expectedEntryHash2 && entry.hash === expectedEntryHash2
|
||||
}
|
||||
|
||||
kv2.events.on('update', onUpdate2)
|
||||
kv2.events.on('error', onError)
|
||||
|
||||
await waitFor(() => replicated2 && replicated3, () => true)
|
||||
|
||||
const all1 = []
|
||||
for await (const keyValue of kv1.iterator()) {
|
||||
all1.push(keyValue)
|
||||
}
|
||||
|
||||
const all2 = []
|
||||
for await (const keyValue of kv2.iterator()) {
|
||||
all2.push(keyValue)
|
||||
}
|
||||
|
||||
deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'two', value: 2 },
|
||||
{ key: 'one', value: 1 },
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend3' },
|
||||
{ key: 'four', value: 4 },
|
||||
{ key: 'C', value: 'CCC' },
|
||||
{ key: 'B', value: 'BBB' },
|
||||
{ key: 'A', value: 'AAA' }
|
||||
])
|
||||
|
||||
deepStrictEqual(all1.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'two', value: 2 },
|
||||
{ key: 'one', value: 1 },
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend3' },
|
||||
{ key: 'four', value: 4 },
|
||||
{ key: 'C', value: 'CCC' },
|
||||
{ key: 'B', value: 'BBB' },
|
||||
{ key: 'A', value: 'AAA' }
|
||||
])
|
||||
})
|
||||
|
||||
it('indexes deletes correctly', async () => {
|
||||
const databaseId = 'kv-CCC'
|
||||
let replicated = false
|
||||
let err
|
||||
|
||||
const onError = (error) => {
|
||||
err = error
|
||||
}
|
||||
|
||||
kv1 = await KeyValueIndexed()({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb11' })
|
||||
|
||||
kv1.events.on('error', onError)
|
||||
|
||||
await kv1.set('init', true)
|
||||
await kv1.set('hello', 'friend')
|
||||
await kv1.del('delete')
|
||||
await kv1.set('delete', 'this value')
|
||||
await kv1.del('delete')
|
||||
|
||||
kv2 = await KeyValueIndexed()({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb22' })
|
||||
|
||||
const onConnected = (entry) => {
|
||||
replicated = true
|
||||
}
|
||||
|
||||
kv2.events.on('join', onConnected)
|
||||
kv2.events.on('error', onError)
|
||||
|
||||
await waitFor(() => replicated, () => true)
|
||||
|
||||
const all1 = []
|
||||
for await (const keyValue of kv1.iterator()) {
|
||||
all1.push(keyValue)
|
||||
}
|
||||
|
||||
const all2 = []
|
||||
for await (const keyValue of kv2.iterator()) {
|
||||
all2.push(keyValue)
|
||||
}
|
||||
|
||||
deepStrictEqual(err, undefined)
|
||||
|
||||
deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend' }
|
||||
])
|
||||
|
||||
deepStrictEqual(all1.map(e => { return { key: e.key, value: e.value } }), [
|
||||
{ key: 'init', value: true },
|
||||
{ key: 'hello', value: 'friend' }
|
||||
])
|
||||
|
||||
await rimraf('./orbitdb11')
|
||||
await rimraf('./orbitdb22')
|
||||
})
|
||||
})
|
||||
|
@ -38,7 +38,7 @@ describe('Identities', function () {
|
||||
identities = await Identities({ path: keysPath })
|
||||
identity = await identities.createIdentity({ id })
|
||||
const key = await identities.keystore.getKey(id)
|
||||
const externalId = uint8ArrayToString(key.public.marshal(), 'base16')
|
||||
const externalId = uint8ArrayToString(key.publicKey.raw, 'base16')
|
||||
assert.strictEqual(identity.id, externalId)
|
||||
})
|
||||
})
|
||||
@ -106,7 +106,7 @@ describe('Identities', function () {
|
||||
identity = await identities.createIdentity({ id })
|
||||
keystore = identities.keystore
|
||||
const key = await keystore.getKey(id)
|
||||
const externalId = uint8ArrayToString(key.public.marshal(), 'base16')
|
||||
const externalId = uint8ArrayToString(key.publicKey.raw, 'base16')
|
||||
assert.strictEqual(identity.id, externalId)
|
||||
})
|
||||
|
||||
@ -117,7 +117,7 @@ describe('Identities', function () {
|
||||
|
||||
it('has the correct public key', async () => {
|
||||
const key = await keystore.getKey(id)
|
||||
const externalId = uint8ArrayToString(key.public.marshal(), 'base16')
|
||||
const externalId = uint8ArrayToString(key.publicKey.raw, 'base16')
|
||||
const signingKey = await keystore.getKey(externalId)
|
||||
assert.notStrictEqual(signingKey, undefined)
|
||||
assert.strictEqual(identity.publicKey, keystore.getPublic(signingKey))
|
||||
@ -125,10 +125,10 @@ describe('Identities', function () {
|
||||
|
||||
it('has a signature for the id', async () => {
|
||||
const key = await keystore.getKey(id)
|
||||
const externalId = uint8ArrayToString(key.public.marshal(), 'base16')
|
||||
const externalId = uint8ArrayToString(key.publicKey.raw, 'base16')
|
||||
const signingKey = await keystore.getKey(externalId)
|
||||
const idSignature = await signMessage(signingKey, externalId)
|
||||
const publicKey = uint8ArrayToString(signingKey.public.marshal(), 'base16')
|
||||
const publicKey = uint8ArrayToString(signingKey.publicKey.raw, 'base16')
|
||||
const verifies = await verifyMessage(idSignature, publicKey, externalId)
|
||||
assert.strictEqual(verifies, true)
|
||||
assert.strictEqual(identity.signatures.id, idSignature)
|
||||
@ -136,7 +136,7 @@ describe('Identities', function () {
|
||||
|
||||
it('has a signature for the publicKey', async () => {
|
||||
const key = await keystore.getKey(id)
|
||||
const externalId = uint8ArrayToString(key.public.marshal(), 'base16')
|
||||
const externalId = uint8ArrayToString(key.publicKey.raw, 'base16')
|
||||
const signingKey = await keystore.getKey(externalId)
|
||||
const idSignature = await signMessage(signingKey, externalId)
|
||||
const externalKey = await keystore.getKey(id)
|
||||
@ -171,7 +171,7 @@ describe('Identities', function () {
|
||||
|
||||
it('has the correct id', async () => {
|
||||
const key = await savedKeysKeyStore.getKey(id)
|
||||
assert.strictEqual(identity.id, uint8ArrayToString(key.public.marshal(), 'base16'))
|
||||
assert.strictEqual(identity.id, uint8ArrayToString(key.publicKey.raw, 'base16'))
|
||||
})
|
||||
|
||||
it('has the correct public key', async () => {
|
||||
|
@ -145,7 +145,7 @@ describe('KeyStore', () => {
|
||||
})
|
||||
|
||||
describe('Options', () => {
|
||||
const unmarshal = crypto.keys.supportedKeys.secp256k1.unmarshalSecp256k1PrivateKey
|
||||
const unmarshal = crypto.keys.privateKeyFromRaw
|
||||
const privateKey = '198594a8de39fd97017d11996d619b3746211605a9d290964badf58bc79bdb33'
|
||||
const publicKey = '0260baeaffa1de1e4135e5b395e0380563a622b9599d1b8e012a0f7603f516bdaa'
|
||||
let privateKeyBuffer, publicKeyBuffer, unmarshalledPrivateKey
|
||||
|
@ -33,7 +33,8 @@ describe('Entry', function () {
|
||||
it('creates a an empty entry', async () => {
|
||||
const expectedHash = 'zdpuAsKzwUEa8cz9pkJxxFMxLuP3cutA9PDGoLZytrg4RSVEa'
|
||||
const entry = await create(testIdentity, 'A', 'hello')
|
||||
strictEqual(entry.hash, expectedHash)
|
||||
const { hash } = await Entry.encode(entry)
|
||||
strictEqual(hash, expectedHash)
|
||||
strictEqual(entry.id, 'A')
|
||||
strictEqual(entry.clock.id, testIdentity.publicKey)
|
||||
strictEqual(entry.clock.time, 0)
|
||||
@ -47,7 +48,8 @@ describe('Entry', function () {
|
||||
const expectedHash = 'zdpuAmthfqpHRQjdSpKN5etr1GrreJb7QcU1Hshm6pERnzsxi'
|
||||
const payload = 'hello world'
|
||||
const entry = await create(testIdentity, 'A', payload)
|
||||
strictEqual(entry.hash, expectedHash)
|
||||
const { hash } = await Entry.encode(entry)
|
||||
strictEqual(hash, expectedHash)
|
||||
strictEqual(entry.payload, payload)
|
||||
strictEqual(entry.id, 'A')
|
||||
strictEqual(entry.clock.id, testIdentity.publicKey)
|
||||
@ -81,7 +83,7 @@ describe('Entry', function () {
|
||||
const payload2 = 'hello again'
|
||||
const entry1 = await create(testIdentity, 'A', payload1)
|
||||
entry1.clock = tickClock(entry1.clock)
|
||||
const entry2 = await create(testIdentity, 'A', payload2, entry1.clock, [entry1])
|
||||
const entry2 = await create(testIdentity, 'A', payload2, null, entry1.clock, [entry1])
|
||||
strictEqual(entry2.payload, payload2)
|
||||
strictEqual(entry2.next.length, 1)
|
||||
// strictEqual(entry2.hash, expectedHash)
|
||||
@ -91,7 +93,8 @@ describe('Entry', function () {
|
||||
|
||||
it('`next` parameter can be an array of strings', async () => {
|
||||
const entry1 = await create(testIdentity, 'A', 'hello1')
|
||||
const entry2 = await create(testIdentity, 'A', 'hello2', null, [entry1.hash])
|
||||
const { hash } = await Entry.encode(entry1)
|
||||
const entry2 = await create(testIdentity, 'A', 'hello2', null, null, [hash])
|
||||
strictEqual(typeof entry2.next[0] === 'string', true)
|
||||
})
|
||||
|
||||
@ -138,7 +141,7 @@ describe('Entry', function () {
|
||||
it('throws an error if next is not an array', async () => {
|
||||
let err
|
||||
try {
|
||||
await create(testIdentity, 'A', 'hello', null, {})
|
||||
await create(testIdentity, 'A', 'hello', null, null, {})
|
||||
} catch (e) {
|
||||
err = e
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { strictEqual, notStrictEqual, deepStrictEqual } from 'assert'
|
||||
import { rimraf } from 'rimraf'
|
||||
import { copy } from 'fs-extra'
|
||||
import { Log, Entry, Identities, KeyStore } from '../../src/index.js'
|
||||
import { Log, Identities, KeyStore } from '../../src/index.js'
|
||||
import { Clock } from '../../src/oplog/log.js'
|
||||
import { MemoryStorage } from '../../src/storage/index.js'
|
||||
import testKeysPath from '../fixtures/test-keys-path.js'
|
||||
@ -760,7 +760,7 @@ describe('Log - Join', async function () {
|
||||
|
||||
await log1.storage.merge(log0.storage)
|
||||
|
||||
await headsStorage1.put(e0.hash, e0.bytes)
|
||||
await headsStorage1.put('heads', new TextEncoder().encode(JSON.stringify([{ hash: e0.hash, next: e0.next }])))
|
||||
|
||||
await log1.append('hello1')
|
||||
await log1.append('hello2')
|
||||
@ -863,7 +863,7 @@ describe('Log - Join', async function () {
|
||||
})
|
||||
|
||||
describe('throws an error if verification of an entry in given entry\'s history fails', async () => {
|
||||
let e1, e3
|
||||
let e1
|
||||
let headsStorage1, headsStorage2
|
||||
|
||||
before(async () => {
|
||||
@ -875,23 +875,19 @@ describe('Log - Join', async function () {
|
||||
|
||||
e1 = await log1.append('hello1')
|
||||
await log1.append('hello2')
|
||||
e3 = await log1.append('hello3')
|
||||
await log1.append('hello3')
|
||||
})
|
||||
|
||||
it('throws an error if an entry doesn\'t have a payload field', async () => {
|
||||
const e = Object.assign({}, e1)
|
||||
delete e.payload
|
||||
|
||||
delete e.bytes
|
||||
delete e.hash
|
||||
const ee = await Entry.encode(e)
|
||||
|
||||
await headsStorage1.put(e1.hash, ee.bytes)
|
||||
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
|
||||
await log2.storage.merge(headsStorage1)
|
||||
|
||||
let err
|
||||
try {
|
||||
await log2.joinEntry(e3)
|
||||
await log2.joinEntry(e)
|
||||
} catch (e) {
|
||||
err = e
|
||||
}
|
||||
@ -906,16 +902,12 @@ describe('Log - Join', async function () {
|
||||
const e = Object.assign({}, e1)
|
||||
delete e.key
|
||||
|
||||
delete e.bytes
|
||||
delete e.hash
|
||||
const ee = await Entry.encode(e)
|
||||
|
||||
await headsStorage1.put(e1.hash, ee.bytes)
|
||||
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
|
||||
await log2.storage.merge(headsStorage1)
|
||||
|
||||
let err
|
||||
try {
|
||||
await log2.joinEntry(e3)
|
||||
await log2.joinEntry(e)
|
||||
} catch (e) {
|
||||
err = e
|
||||
}
|
||||
@ -930,16 +922,12 @@ describe('Log - Join', async function () {
|
||||
const e = Object.assign({}, e1)
|
||||
delete e.sig
|
||||
|
||||
delete e.bytes
|
||||
delete e.hash
|
||||
const ee = await Entry.encode(e)
|
||||
|
||||
await headsStorage1.put(e1.hash, ee.bytes)
|
||||
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
|
||||
await log2.storage.merge(headsStorage1)
|
||||
|
||||
let err
|
||||
try {
|
||||
await log2.joinEntry(e3)
|
||||
await log2.joinEntry(e)
|
||||
} catch (e) {
|
||||
err = e
|
||||
}
|
||||
@ -953,22 +941,19 @@ describe('Log - Join', async function () {
|
||||
it('throws an error if an entry signature doesn\'t verify', async () => {
|
||||
const e = Object.assign({}, e1)
|
||||
e.sig = '1234567890'
|
||||
delete e.bytes
|
||||
delete e.hash
|
||||
const ee = await Entry.encode(e)
|
||||
|
||||
await headsStorage1.put(e1.hash, ee.bytes)
|
||||
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
|
||||
await log2.storage.merge(headsStorage1)
|
||||
|
||||
let err
|
||||
try {
|
||||
await log2.joinEntry(e3)
|
||||
await log2.joinEntry(e)
|
||||
} catch (e) {
|
||||
err = e
|
||||
}
|
||||
|
||||
notStrictEqual(err, undefined)
|
||||
strictEqual(err.message, 'Could not validate signature for entry "zdpuAvkAJ8C46cnGdtFpcBratA5MqK7CcjqCJjjmuKuFvZir3"')
|
||||
strictEqual(err.message, 'Could not validate signature for entry "zdpuAxyE4ScWLf4X6VvkhMrpDQvwdvQno1DhzY5p1U3GPHrBT"')
|
||||
deepStrictEqual(await log2.all(), [])
|
||||
deepStrictEqual(await log2.heads(), [])
|
||||
})
|
||||
|
@ -60,15 +60,21 @@ describe('Log', function () {
|
||||
})
|
||||
|
||||
it('sets one head if multiple are given as params', async () => {
|
||||
const one = await create(testIdentity, 'A', 'entryA', null, [])
|
||||
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash])
|
||||
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash])
|
||||
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash])
|
||||
const one = await create(testIdentity, 'A', 'entryA', null, null, [])
|
||||
const { hash: hash1, bytes: bytes1 } = await Entry.encode(one)
|
||||
const two = await create(testIdentity, 'A', 'entryB', null, null, [hash1])
|
||||
const { hash: hash2, bytes: bytes2 } = await Entry.encode(two)
|
||||
const three = await create(testIdentity, 'A', 'entryC', null, null, [hash2])
|
||||
const { hash: hash3, bytes: bytes3 } = await Entry.encode(three)
|
||||
const four = await create(testIdentity, 'A', 'entryD', null, null, [hash3])
|
||||
const { hash: hash4, bytes: bytes4 } = await Entry.encode(four)
|
||||
const entryStorage = await MemoryStorage()
|
||||
await entryStorage.put(one.hash, one.bytes)
|
||||
await entryStorage.put(two.hash, two.bytes)
|
||||
await entryStorage.put(three.hash, three.bytes)
|
||||
await entryStorage.put(four.hash, four.bytes)
|
||||
await entryStorage.put(hash1, bytes1)
|
||||
await entryStorage.put(hash2, bytes2)
|
||||
await entryStorage.put(hash3, bytes3)
|
||||
await entryStorage.put(hash4, bytes4)
|
||||
three.hash = hash3
|
||||
two.hash = hash2
|
||||
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, three, two, two], entryStorage })
|
||||
const values = await log.values()
|
||||
const heads = await log.heads()
|
||||
@ -78,15 +84,22 @@ describe('Log', function () {
|
||||
})
|
||||
|
||||
it('sets two heads if two given as params', async () => {
|
||||
const one = await create(testIdentity, 'A', 'entryA', null, [])
|
||||
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash])
|
||||
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash])
|
||||
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash])
|
||||
const one = await create(testIdentity, 'A', 'entryA', null, null, [])
|
||||
const { hash: hash1, bytes: bytes1 } = await Entry.encode(one)
|
||||
const two = await create(testIdentity, 'A', 'entryB', null, null, [hash1])
|
||||
const { hash: hash2, bytes: bytes2 } = await Entry.encode(two)
|
||||
const three = await create(testIdentity, 'A', 'entryC', null, null, [hash2])
|
||||
const { hash: hash3, bytes: bytes3 } = await Entry.encode(three)
|
||||
const four = await create(testIdentity, 'A', 'entryD', null, null, [hash2])
|
||||
const { hash: hash4, bytes: bytes4 } = await Entry.encode(four)
|
||||
const entryStorage = await MemoryStorage()
|
||||
await entryStorage.put(one.hash, one.bytes)
|
||||
await entryStorage.put(two.hash, two.bytes)
|
||||
await entryStorage.put(three.hash, three.bytes)
|
||||
await entryStorage.put(four.hash, four.bytes)
|
||||
await entryStorage.put(hash1, bytes1)
|
||||
await entryStorage.put(hash2, bytes2)
|
||||
await entryStorage.put(hash3, bytes3)
|
||||
await entryStorage.put(hash4, bytes4)
|
||||
three.hash = hash3
|
||||
four.hash = hash4
|
||||
two.hash = hash2
|
||||
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, four, two], entryStorage })
|
||||
const values = await log.values()
|
||||
const heads = await log.heads()
|
||||
|
@ -69,7 +69,7 @@ describe('Log - Replication', function () {
|
||||
try {
|
||||
if (!messageIsFromMe(message)) {
|
||||
const entry = await Entry.decode(message.detail.data)
|
||||
await storage1.put(entry.hash, entry.bytes)
|
||||
await storage1.put(entry.hash, message.detail.data)
|
||||
await log1.joinEntry(entry)
|
||||
}
|
||||
} catch (e) {
|
||||
@ -83,7 +83,7 @@ describe('Log - Replication', function () {
|
||||
try {
|
||||
if (!messageIsFromMe(message)) {
|
||||
const entry = await Entry.decode(message.detail.data)
|
||||
await storage2.put(entry.hash, entry.bytes)
|
||||
await storage2.put(entry.hash, message.detail.data)
|
||||
await log2.joinEntry(entry)
|
||||
}
|
||||
} catch (e) {
|
||||
@ -114,8 +114,10 @@ describe('Log - Replication', function () {
|
||||
for (let i = 1; i <= amount; i++) {
|
||||
const entry1 = await input1.append('A' + i)
|
||||
const entry2 = await input2.append('B' + i)
|
||||
await ipfs1.libp2p.services.pubsub.publish(logId, entry1.bytes)
|
||||
await ipfs2.libp2p.services.pubsub.publish(logId, entry2.bytes)
|
||||
const bytes1 = await input1.storage.get(entry1.hash)
|
||||
const bytes2 = await input1.storage.get(entry2.hash)
|
||||
await ipfs1.libp2p.services.pubsub.publish(logId, bytes1)
|
||||
await ipfs2.libp2p.services.pubsub.publish(logId, bytes2)
|
||||
}
|
||||
|
||||
console.log('Messages sent')
|
||||
|
376
test/orbitdb-encryption.test.js
Normal file
376
test/orbitdb-encryption.test.js
Normal file
@ -0,0 +1,376 @@
|
||||
import { strictEqual, notEqual } from 'assert'
|
||||
import { rimraf } from 'rimraf'
|
||||
import path from 'path'
|
||||
import { createOrbitDB } from '../src/index.js'
|
||||
import connectPeers from './utils/connect-nodes.js'
|
||||
import waitFor from './utils/wait-for.js'
|
||||
import createHelia from './utils/create-helia.js'
|
||||
|
||||
import * as Block from 'multiformats/block'
|
||||
import * as dagCbor from '@ipld/dag-cbor'
|
||||
import { sha256 } from 'multiformats/hashes/sha2'
|
||||
|
||||
import SimpleEncryption from '@orbitdb/simple-encryption'
|
||||
|
||||
const codec = dagCbor
|
||||
const hasher = sha256
|
||||
|
||||
const dbPath = './orbitdb/tests/write-permissions'
|
||||
|
||||
describe('Encryption', function () {
|
||||
this.timeout(5000)
|
||||
|
||||
let ipfs1, ipfs2
|
||||
let orbitdb1, orbitdb2
|
||||
let db1, db2
|
||||
|
||||
let replicationEncryption
|
||||
let dataEncryption
|
||||
|
||||
before(async () => {
|
||||
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
|
||||
await rimraf('./orbitdb')
|
||||
|
||||
orbitdb1 = await createOrbitDB({ ipfs: ipfs1, id: 'user1', directory: path.join(dbPath, '1') })
|
||||
orbitdb2 = await createOrbitDB({ ipfs: ipfs2, id: 'user2', directory: path.join(dbPath, '2') })
|
||||
|
||||
replicationEncryption = await SimpleEncryption({ password: 'hello' })
|
||||
dataEncryption = await SimpleEncryption({ password: 'world' })
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if (orbitdb1) {
|
||||
await orbitdb1.stop()
|
||||
}
|
||||
|
||||
if (orbitdb2) {
|
||||
await orbitdb2.stop()
|
||||
}
|
||||
|
||||
if (ipfs1) {
|
||||
await ipfs1.stop()
|
||||
}
|
||||
|
||||
if (ipfs2) {
|
||||
await ipfs2.stop()
|
||||
}
|
||||
|
||||
await rimraf('./orbitdb')
|
||||
await rimraf('./ipfs1')
|
||||
await rimraf('./ipfs2')
|
||||
})
|
||||
|
||||
describe('Data is encrypted when replicated to peers', async () => {
|
||||
afterEach(async () => {
|
||||
if (db1) {
|
||||
await db1.drop()
|
||||
await db1.close()
|
||||
}
|
||||
if (db2) {
|
||||
await db2.drop()
|
||||
await db2.close()
|
||||
}
|
||||
})
|
||||
|
||||
it('encrypts/decrypts data', async () => {
|
||||
let connected = false
|
||||
let updated = false
|
||||
let error = false
|
||||
|
||||
const encryption = {
|
||||
data: dataEncryption
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
db2 = await orbitdb2.open(db1.address, { encryption })
|
||||
|
||||
const onJoin = async (peerId, heads) => {
|
||||
connected = true
|
||||
}
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
const onUpdate = async (peerId, heads) => {
|
||||
updated = true
|
||||
}
|
||||
db2.events.on('update', onUpdate)
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
console.log(err)
|
||||
error = true
|
||||
}
|
||||
db2.events.on('error', onError)
|
||||
|
||||
const hash1 = await db1.add('record 1')
|
||||
const hash2 = await db1.add('record 2')
|
||||
|
||||
strictEqual(await db1.get(hash1), 'record 1')
|
||||
strictEqual(await db1.get(hash2), 'record 2')
|
||||
|
||||
await waitFor(() => updated || error, () => true)
|
||||
|
||||
const all = await db2.all()
|
||||
|
||||
strictEqual(all.length, 2)
|
||||
strictEqual(all[0].value, 'record 1')
|
||||
strictEqual(all[1].value, 'record 2')
|
||||
})
|
||||
|
||||
it('encrypts/decrypts log', async () => {
|
||||
let connected = false
|
||||
let updated = false
|
||||
let error = false
|
||||
|
||||
const encryption = {
|
||||
replication: replicationEncryption
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
db2 = await orbitdb2.open(db1.address, { encryption })
|
||||
|
||||
const onJoin = async (peerId, heads) => {
|
||||
connected = true
|
||||
}
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
const onUpdate = async (peerId, heads) => {
|
||||
updated = true
|
||||
}
|
||||
db2.events.on('update', onUpdate)
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
console.log(err)
|
||||
error = true
|
||||
}
|
||||
db2.events.on('error', onError)
|
||||
|
||||
const hash1 = await db1.add('record 1')
|
||||
const hash2 = await db1.add('record 2')
|
||||
|
||||
strictEqual(await db1.get(hash1), 'record 1')
|
||||
strictEqual(await db1.get(hash2), 'record 2')
|
||||
|
||||
await waitFor(() => updated || error, () => true)
|
||||
|
||||
const all = await db2.all()
|
||||
|
||||
strictEqual(all.length, 2)
|
||||
strictEqual(all[0].value, 'record 1')
|
||||
strictEqual(all[1].value, 'record 2')
|
||||
})
|
||||
|
||||
it('encrypts/decrypts log and data', async () => {
|
||||
let connected = false
|
||||
let updated = false
|
||||
let error = false
|
||||
|
||||
const encryption = {
|
||||
replication: replicationEncryption,
|
||||
data: dataEncryption
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
db2 = await orbitdb2.open(db1.address, { encryption })
|
||||
|
||||
const onJoin = async (peerId, heads) => {
|
||||
connected = true
|
||||
}
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
const onUpdate = async (peerId, heads) => {
|
||||
updated = true
|
||||
}
|
||||
db2.events.on('update', onUpdate)
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
console.log(err)
|
||||
error = true
|
||||
}
|
||||
db2.events.on('error', onError)
|
||||
|
||||
const hash1 = await db1.add('record 1')
|
||||
const hash2 = await db1.add('record 2')
|
||||
|
||||
strictEqual(await db1.get(hash1), 'record 1')
|
||||
strictEqual(await db1.get(hash2), 'record 2')
|
||||
|
||||
await waitFor(() => updated || error, () => true)
|
||||
|
||||
const all = await db2.all()
|
||||
|
||||
strictEqual(all.length, 2)
|
||||
strictEqual(all[0].value, 'record 1')
|
||||
strictEqual(all[1].value, 'record 2')
|
||||
})
|
||||
|
||||
it('throws an error if log can\'t be decrypted', async () => {
|
||||
let connected = false
|
||||
let hasError = false
|
||||
let error
|
||||
|
||||
const replicationEncryptionWithFailure = await SimpleEncryption({ password: 'goodbye' })
|
||||
|
||||
const encryption = {
|
||||
replication: replicationEncryption
|
||||
}
|
||||
|
||||
const encryptionWithFailure = {
|
||||
replication: replicationEncryptionWithFailure
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
db2 = await orbitdb2.open(db1.address, { encryption: encryptionWithFailure })
|
||||
|
||||
const onJoin = async (peerId, heads) => {
|
||||
connected = true
|
||||
}
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
error = err
|
||||
hasError = true
|
||||
}
|
||||
db2.events.on('error', onError)
|
||||
|
||||
await db1.add('record 1')
|
||||
|
||||
await waitFor(() => hasError, () => true)
|
||||
|
||||
strictEqual(error.message, 'Could not decrypt entry')
|
||||
|
||||
const all = await db2.all()
|
||||
|
||||
strictEqual(all.length, 0)
|
||||
})
|
||||
|
||||
it('throws an error if data can\'t be decrypted', async () => {
|
||||
let connected = false
|
||||
let hasError = false
|
||||
let error
|
||||
|
||||
const dataEncryptionWithFailure = await SimpleEncryption({ password: 'goodbye' })
|
||||
|
||||
const encryption = {
|
||||
data: dataEncryption
|
||||
}
|
||||
|
||||
const encryptionWithFailure = {
|
||||
data: dataEncryptionWithFailure
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
db2 = await orbitdb2.open(db1.address, { encryption: encryptionWithFailure })
|
||||
|
||||
const onJoin = async (peerId, heads) => {
|
||||
connected = true
|
||||
}
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
error = err
|
||||
hasError = true
|
||||
}
|
||||
db2.events.on('error', onError)
|
||||
|
||||
await db1.add('record 1')
|
||||
|
||||
await waitFor(() => hasError, () => true)
|
||||
|
||||
strictEqual(error.message, 'Could not decrypt payload')
|
||||
|
||||
const all = await db2.all()
|
||||
|
||||
strictEqual(all.length, 0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Data is encrypted in storage', async () => {
|
||||
afterEach(async () => {
|
||||
if (db1) {
|
||||
await db1.drop()
|
||||
await db1.close()
|
||||
}
|
||||
})
|
||||
|
||||
it('payload bytes are encrypted in storage', async () => {
|
||||
let error
|
||||
|
||||
const encryption = {
|
||||
data: dataEncryption
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
console.log(err)
|
||||
error = true
|
||||
}
|
||||
db1.events.on('error', onError)
|
||||
|
||||
const hash1 = await db1.add('record 1')
|
||||
|
||||
const bytes = await db1.log.storage.get(hash1)
|
||||
const { value } = await Block.decode({ bytes, codec, hasher })
|
||||
const payload = value.payload
|
||||
|
||||
strictEqual(payload.constructor, Uint8Array)
|
||||
|
||||
try {
|
||||
await Block.decode({ bytes: payload, codec, hasher })
|
||||
} catch (e) {
|
||||
error = e
|
||||
}
|
||||
|
||||
strictEqual(error.message.startsWith('CBOR decode error'), true)
|
||||
})
|
||||
|
||||
it('entry bytes are encrypted in storage', async () => {
|
||||
let error
|
||||
|
||||
const encryption = {
|
||||
replication: replicationEncryption
|
||||
}
|
||||
|
||||
db1 = await orbitdb1.open('encryption-test-1', { encryption })
|
||||
|
||||
const onError = async (err) => {
|
||||
// Catch "Could not decrypt entry" errors
|
||||
console.log(err)
|
||||
error = true
|
||||
}
|
||||
db1.events.on('error', onError)
|
||||
|
||||
const hash1 = await db1.add('record 1')
|
||||
let decodedBytes
|
||||
|
||||
try {
|
||||
const bytes = await db1.log.storage.get(hash1)
|
||||
decodedBytes = await Block.decode({ bytes, codec, hasher })
|
||||
await Block.decode({ bytes: decodedBytes, codec, hasher })
|
||||
} catch (e) {
|
||||
error = e
|
||||
}
|
||||
|
||||
notEqual(error, undefined)
|
||||
strictEqual(error.message.startsWith('CBOR decode error'), true)
|
||||
strictEqual(decodedBytes.value.constructor, Uint8Array)
|
||||
})
|
||||
})
|
||||
})
|
@ -2,7 +2,7 @@ import { deepStrictEqual, strictEqual, notStrictEqual } from 'assert'
|
||||
import { rimraf } from 'rimraf'
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { createOrbitDB, isValidAddress, LevelStorage } from '../src/index.js'
|
||||
import { createOrbitDB, isValidAddress } from '../src/index.js'
|
||||
import KeyValueIndexed from '../src/databases/keyvalue-indexed.js'
|
||||
import connectPeers from './utils/connect-nodes.js'
|
||||
import waitFor from './utils/wait-for.js'
|
||||
@ -419,14 +419,13 @@ describe('Open databases', function () {
|
||||
})
|
||||
|
||||
describe('opening an indexed keyvalue database', () => {
|
||||
let storage
|
||||
let db, address
|
||||
|
||||
const amount = 10
|
||||
|
||||
before(async () => {
|
||||
orbitdb1 = await createOrbitDB({ ipfs: ipfs1, id: 'user1' })
|
||||
db = await orbitdb1.open('helloworld', { type: 'keyvalue' })
|
||||
db = await orbitdb1.open('helloworld', { Database: KeyValueIndexed() })
|
||||
address = db.address
|
||||
|
||||
for (let i = 0; i < amount; i++) {
|
||||
@ -437,9 +436,6 @@ describe('Open databases', function () {
|
||||
})
|
||||
|
||||
after(async () => {
|
||||
if (storage) {
|
||||
await storage.close()
|
||||
}
|
||||
if (db) {
|
||||
await db.close()
|
||||
}
|
||||
@ -451,8 +447,7 @@ describe('Open databases', function () {
|
||||
})
|
||||
|
||||
it('returns all entries in the database and in the index', async () => {
|
||||
storage = await LevelStorage({ path: './index', valueEncoding: 'json' })
|
||||
db = await orbitdb1.open(address, { Database: KeyValueIndexed({ storage }) })
|
||||
db = await orbitdb1.open(address, { Database: KeyValueIndexed() })
|
||||
|
||||
strictEqual(db.address, address)
|
||||
strictEqual(db.type, 'keyvalue')
|
||||
@ -464,18 +459,11 @@ describe('Open databases', function () {
|
||||
}
|
||||
|
||||
const result = []
|
||||
for await (const [key, value] of storage.iterator()) {
|
||||
result.push({ key, value })
|
||||
for await (const { key, value } of db.iterator()) {
|
||||
result.unshift({ key, value })
|
||||
}
|
||||
|
||||
deepStrictEqual(result, expected)
|
||||
|
||||
const all = []
|
||||
for await (const { key, value } of db.iterator()) {
|
||||
all.unshift({ key, value })
|
||||
}
|
||||
|
||||
deepStrictEqual(all, expected)
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -1,9 +1,11 @@
|
||||
import { deepStrictEqual } from 'assert'
|
||||
import { deepStrictEqual, strictEqual } from 'assert'
|
||||
import { rimraf } from 'rimraf'
|
||||
import { createOrbitDB } from '../src/index.js'
|
||||
import connectPeers from './utils/connect-nodes.js'
|
||||
import waitFor from './utils/wait-for.js'
|
||||
import createHelia from './utils/create-helia.js'
|
||||
import { CID } from 'multiformats/cid'
|
||||
import { base58btc } from 'multiformats/bases/base58'
|
||||
|
||||
describe('Replicating databases', function () {
|
||||
this.timeout(10000)
|
||||
@ -23,8 +25,8 @@ describe('Replicating databases', function () {
|
||||
after(async () => {
|
||||
await orbitdb1.stop()
|
||||
await orbitdb2.stop()
|
||||
await ipfs1.blockstore.child.child.close()
|
||||
await ipfs2.blockstore.child.child.close()
|
||||
await ipfs1.blockstore.child.child.child.close()
|
||||
await ipfs2.blockstore.child.child.child.close()
|
||||
await ipfs1.stop()
|
||||
await ipfs2.stop()
|
||||
|
||||
@ -136,8 +138,12 @@ describe('Replicating databases', function () {
|
||||
|
||||
await orbitdb1.stop()
|
||||
await orbitdb2.stop()
|
||||
await ipfs1.blockstore.child.child.close()
|
||||
await ipfs2.blockstore.child.child.close()
|
||||
// TODO: Strange issue with ClassicLevel. Causes subsequent Helia
|
||||
// instantiations to error with db closed. Explicitly closing the
|
||||
// nested ClassicLevel db seems to resolve the issue. Requires further
|
||||
// investigation.
|
||||
await ipfs1.blockstore.child.child.child.close()
|
||||
await ipfs2.blockstore.child.child.child.close()
|
||||
await ipfs1.stop()
|
||||
await ipfs2.stop()
|
||||
|
||||
@ -163,5 +169,26 @@ describe('Replicating databases', function () {
|
||||
|
||||
console.log('events:', amount)
|
||||
})
|
||||
|
||||
it('pins all entries in the replicated database', async () => {
|
||||
const db1 = await orbitdb1.open('helloworld', { referencesCount: 0 })
|
||||
const hash = await db1.add('hello world')
|
||||
|
||||
let replicated = false
|
||||
|
||||
const onJoin = async (peerId, heads) => {
|
||||
replicated = true
|
||||
}
|
||||
|
||||
const db2 = await orbitdb2.open(db1.address)
|
||||
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => replicated, () => true)
|
||||
|
||||
const cid = CID.parse(hash, base58btc)
|
||||
strictEqual(await ipfs1.pins.isPinned(cid), true)
|
||||
strictEqual(await ipfs2.pins.isPinned(cid), true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -72,8 +72,8 @@ describe('OrbitDB', function () {
|
||||
const privateKey = await orbitdb1.keystore.getKey(orbitdb1.identity.id)
|
||||
notStrictEqual(privateKey, undefined)
|
||||
strictEqual(privateKey.constructor.name, 'Secp256k1PrivateKey')
|
||||
notStrictEqual(privateKey._key, undefined)
|
||||
notStrictEqual(privateKey._publicKey, undefined)
|
||||
notStrictEqual(privateKey.raw, undefined)
|
||||
notStrictEqual(privateKey.publicKey, undefined)
|
||||
})
|
||||
|
||||
it('has a keystore that contains a public key that matches the identity\'s public key', async () => {
|
||||
@ -102,8 +102,8 @@ describe('OrbitDB', function () {
|
||||
notStrictEqual(orbitdb1.peerId, undefined)
|
||||
})
|
||||
|
||||
it('has a peerId of type Ed25519PeerIdImpl', async () => {
|
||||
strictEqual(orbitdb1.peerId.constructor.name, 'Ed25519PeerIdImpl')
|
||||
it('has a peerId of type Ed25519', async () => {
|
||||
strictEqual(orbitdb1.peerId.type, 'Ed25519')
|
||||
})
|
||||
|
||||
it('has a peerId that matches the IPFS id', async () => {
|
||||
@ -164,8 +164,8 @@ describe('OrbitDB', function () {
|
||||
const privateKey = await orbitdb1.keystore.getKey(orbitdb1.identity.id)
|
||||
notStrictEqual(privateKey, undefined)
|
||||
strictEqual(privateKey.constructor.name, 'Secp256k1PrivateKey')
|
||||
notStrictEqual(privateKey._key, undefined)
|
||||
notStrictEqual(privateKey._publicKey, undefined)
|
||||
notStrictEqual(privateKey.raw, undefined)
|
||||
notStrictEqual(privateKey.publicKey, undefined)
|
||||
})
|
||||
|
||||
it('has a keystore that contains a public key that matches the identity\'s public key', async () => {
|
||||
@ -194,8 +194,8 @@ describe('OrbitDB', function () {
|
||||
notStrictEqual(orbitdb1.peerId, undefined)
|
||||
})
|
||||
|
||||
it('has a peerId of type Ed25519PeerIdImpl', async () => {
|
||||
strictEqual(orbitdb1.peerId.constructor.name, 'Ed25519PeerIdImpl')
|
||||
it('has a peerId of type Ed25519', async () => {
|
||||
strictEqual(orbitdb1.peerId.type, 'Ed25519')
|
||||
})
|
||||
|
||||
it('has a peerId that matches the IPFS id', async () => {
|
||||
|
@ -2,7 +2,7 @@ import { strictEqual, notStrictEqual } from 'assert'
|
||||
import { rimraf } from 'rimraf'
|
||||
import { copy } from 'fs-extra'
|
||||
import { Log, Identities, KeyStore } from '../src/index.js'
|
||||
import { IPFSBlockStorage, MemoryStorage, LRUStorage, ComposedStorage } from '../src/storage/index.js'
|
||||
import { IPFSBlockStorage, MemoryStorage, LRUStorage, ComposedStorage, LevelStorage } from '../src/storage/index.js'
|
||||
import testKeysPath from './fixtures/test-keys-path.js'
|
||||
import createHelia from './utils/create-helia.js'
|
||||
|
||||
@ -33,6 +33,7 @@ describe('Storages', function () {
|
||||
await keystore.close()
|
||||
}
|
||||
|
||||
await rimraf('./level')
|
||||
await rimraf('./ipfs1')
|
||||
await rimraf(keysPath)
|
||||
})
|
||||
@ -41,26 +42,56 @@ describe('Storages', function () {
|
||||
const amount = 100
|
||||
const log1 = await Log(testIdentity, { logId: 'A', entryStorage: storage })
|
||||
const log2 = await Log(testIdentity, { logId: 'A', entryStorage: storage })
|
||||
|
||||
for (let i = 0; i < amount; i++) {
|
||||
await log1.append('hello' + i)
|
||||
await log2.append('hello' + i)
|
||||
}
|
||||
|
||||
const values = await log1.values()
|
||||
const heads = await log1.heads()
|
||||
|
||||
strictEqual(heads.length, 1)
|
||||
strictEqual(values.length, amount)
|
||||
|
||||
await log1.storage.clear()
|
||||
await log2.storage.clear()
|
||||
|
||||
const result = []
|
||||
for await (const v of log1.storage.iterator()) {
|
||||
result.push(v)
|
||||
}
|
||||
for await (const v of log2.storage.iterator()) {
|
||||
result.push(v)
|
||||
}
|
||||
|
||||
strictEqual(result.length, 0)
|
||||
|
||||
await log1.storage.close()
|
||||
await log2.storage.close()
|
||||
}
|
||||
|
||||
const testInterface = (storage) => {
|
||||
notStrictEqual(storage.put, undefined)
|
||||
notStrictEqual(storage.get, undefined)
|
||||
notStrictEqual(storage.del, undefined)
|
||||
notStrictEqual(storage.iterator, undefined)
|
||||
notStrictEqual(storage.merge, undefined)
|
||||
notStrictEqual(storage.clear, undefined)
|
||||
notStrictEqual(storage.close, undefined)
|
||||
}
|
||||
|
||||
describe('LRUStorage', () => {
|
||||
it('tests the storage', async () => {
|
||||
const storage = await LRUStorage()
|
||||
notStrictEqual(storage, undefined)
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('has correct interface', async () => {
|
||||
const storage = await LRUStorage()
|
||||
testInterface(storage)
|
||||
})
|
||||
})
|
||||
|
||||
describe('MemoryStorage', () => {
|
||||
@ -69,6 +100,11 @@ describe('Storages', function () {
|
||||
notStrictEqual(storage, undefined)
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('has correct interface', async () => {
|
||||
const storage = await MemoryStorage()
|
||||
testInterface(storage)
|
||||
})
|
||||
})
|
||||
|
||||
describe('IPFSBlockStorage', () => {
|
||||
@ -77,9 +113,27 @@ describe('Storages', function () {
|
||||
notStrictEqual(storage, undefined)
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('has correct interface', async () => {
|
||||
const storage = await IPFSBlockStorage({ ipfs })
|
||||
testInterface(storage)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Composed Storages', () => {
|
||||
describe('LevelStorage', () => {
|
||||
it('tests the storage', async () => {
|
||||
const storage = await LevelStorage()
|
||||
notStrictEqual(storage, undefined)
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('has correct interface', async () => {
|
||||
const storage = await LevelStorage()
|
||||
testInterface(storage)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Composed Storage', () => {
|
||||
it('tests Memory + IPFSBlockStorage composition', async () => {
|
||||
const storage1 = await MemoryStorage()
|
||||
const storage2 = await IPFSBlockStorage({ ipfs })
|
||||
@ -88,6 +142,14 @@ describe('Storages', function () {
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('tests Memory + LevelStorage composition', async () => {
|
||||
const storage1 = await MemoryStorage()
|
||||
const storage2 = await LevelStorage()
|
||||
const storage = await ComposedStorage(storage1, storage2)
|
||||
notStrictEqual(storage, undefined)
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('tests LRU + IPFSBlockStorage composition', async () => {
|
||||
const storage1 = await LRUStorage({ size: -1 })
|
||||
const storage2 = await IPFSBlockStorage({ ipfs })
|
||||
@ -111,5 +173,12 @@ describe('Storages', function () {
|
||||
notStrictEqual(storage, undefined)
|
||||
await runTestWithStorage(storage)
|
||||
})
|
||||
|
||||
it('has correct interface', async () => {
|
||||
const storage1 = await LRUStorage()
|
||||
const storage2 = await MemoryStorage()
|
||||
const storage = await ComposedStorage(storage1, storage2)
|
||||
testInterface(storage)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -25,14 +25,28 @@ describe('IPFSBlockStorage', function () {
|
||||
})
|
||||
|
||||
it('gets a block', async () => {
|
||||
const expected = 'hello world'
|
||||
const { cid, bytes } = await Block.encode({ value: expected, codec, hasher })
|
||||
|
||||
const hash = cid.toString(base58btc)
|
||||
|
||||
await storage.put(hash, bytes)
|
||||
|
||||
const data = await storage.get(hash)
|
||||
const block = await Block.decode({ bytes: data, codec, hasher })
|
||||
const actual = block.value
|
||||
|
||||
strictEqual(actual, expected)
|
||||
})
|
||||
|
||||
it('checks that a block is pinned', async () => {
|
||||
const expected = 'hello world'
|
||||
const block = await Block.encode({ value: expected, codec, hasher })
|
||||
const cid = block.cid.toString(base58btc)
|
||||
|
||||
await storage.put(cid, 'hello world')
|
||||
const actual = await storage.get(cid)
|
||||
await storage.put(cid, block.bytes)
|
||||
|
||||
strictEqual(actual, expected)
|
||||
strictEqual(await ipfs.pins.isPinned(block.cid), true)
|
||||
})
|
||||
|
||||
it('throws an error if a block does not exist', async () => {
|
||||
|
@ -139,8 +139,7 @@ describe('Sync protocol', function () {
|
||||
log1 = await Log(testIdentity1, { logId: 'synclog111', entryStorage: entryStorage1 })
|
||||
log2 = await Log(testIdentity2, { logId: 'synclog111', entryStorage: entryStorage2 })
|
||||
|
||||
const onSynced = async (bytes) => {
|
||||
const entry = await Entry.decode(bytes)
|
||||
const onSynced = async (entry) => {
|
||||
if (await log2.joinEntry(entry)) {
|
||||
syncedHead = entry
|
||||
syncedEventFired = true
|
||||
@ -207,8 +206,7 @@ describe('Sync protocol', function () {
|
||||
log1 = await Log(testIdentity1, { logId: 'synclog7', entryStorage: entryStorage1 })
|
||||
log2 = await Log(testIdentity2, { logId: 'synclog7', entryStorage: entryStorage2 })
|
||||
|
||||
const onSynced = async (bytes) => {
|
||||
const entry = await Entry.decode(bytes)
|
||||
const onSynced = async (entry) => {
|
||||
if (await log2.joinEntry(entry)) {
|
||||
syncedHead = entry
|
||||
}
|
||||
@ -291,8 +289,8 @@ describe('Sync protocol', function () {
|
||||
log1 = await Log(testIdentity1, { logId: 'synclog1' })
|
||||
log2 = await Log(testIdentity2, { logId: 'synclog1' })
|
||||
|
||||
const onSynced = async (bytes) => {
|
||||
syncedHead = await Entry.decode(bytes)
|
||||
const onSynced = async (entry) => {
|
||||
syncedHead = entry
|
||||
syncedEventFired = expectedEntry.hash === syncedHead.hash
|
||||
}
|
||||
|
||||
@ -348,8 +346,8 @@ describe('Sync protocol', function () {
|
||||
log1 = await Log(testIdentity1, { logId: 'synclog1' })
|
||||
log2 = await Log(testIdentity2, { logId: 'synclog1' })
|
||||
|
||||
const onSynced = async (bytes) => {
|
||||
syncedHead = await Entry.decode(bytes)
|
||||
const onSynced = async (entry) => {
|
||||
syncedHead = entry
|
||||
if (expectedEntry) {
|
||||
syncedEventFired = expectedEntry.hash === syncedHead.hash
|
||||
}
|
||||
@ -434,9 +432,9 @@ describe('Sync protocol', function () {
|
||||
log1 = await Log(testIdentity1, { logId: 'synclog1' })
|
||||
log2 = await Log(testIdentity2, { logId: 'synclog1' })
|
||||
|
||||
const onSynced = async (bytes) => {
|
||||
const onSynced = async (entry) => {
|
||||
if (expectedEntry && !syncedEventFired) {
|
||||
syncedHead = await Entry.decode(bytes)
|
||||
syncedHead = entry
|
||||
syncedEventFired = expectedEntry.hash === syncedHead.hash
|
||||
}
|
||||
}
|
||||
@ -518,8 +516,8 @@ describe('Sync protocol', function () {
|
||||
log1 = await Log(testIdentity1, { logId: 'synclog2' })
|
||||
log2 = await Log(testIdentity2, { logId: 'synclog2' })
|
||||
|
||||
const onSynced = async (bytes) => {
|
||||
syncedHead = await Entry.decode(bytes)
|
||||
const onSynced = async (entry) => {
|
||||
syncedHead = entry
|
||||
if (expectedEntry) {
|
||||
syncedEventFired = expectedEntry ? expectedEntry.hash === syncedHead.hash : false
|
||||
}
|
||||
@ -665,7 +663,7 @@ describe('Sync protocol', function () {
|
||||
let sync1, sync2
|
||||
let log1, log2
|
||||
|
||||
const timeoutTime = 1 // 1 millisecond
|
||||
const timeoutTime = 0 // 0 milliseconds
|
||||
|
||||
before(async () => {
|
||||
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
|
||||
@ -701,13 +699,13 @@ describe('Sync protocol', function () {
|
||||
let err = null
|
||||
|
||||
const onError = (error) => {
|
||||
(!err) && (err = error)
|
||||
err ??= error
|
||||
}
|
||||
|
||||
sync1 = await Sync({ ipfs: ipfs1, log: log1, timeout: timeoutTime })
|
||||
sync2 = await Sync({ ipfs: ipfs2, log: log2, start: false, timeout: timeoutTime })
|
||||
|
||||
sync1.events.on('error', onError)
|
||||
|
||||
sync2 = await Sync({ ipfs: ipfs2, log: log2, start: false, timeout: timeoutTime })
|
||||
sync2.events.on('error', onError)
|
||||
|
||||
await log1.append('hello1')
|
||||
@ -718,7 +716,7 @@ describe('Sync protocol', function () {
|
||||
|
||||
notStrictEqual(err, null)
|
||||
strictEqual(err.type, 'aborted')
|
||||
strictEqual(err.message, 'Read aborted')
|
||||
strictEqual(err.message.includes('aborted'), true)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -21,20 +21,16 @@ const Libp2pOptions = {
|
||||
transports: [
|
||||
webSockets({
|
||||
filter: all
|
||||
}),
|
||||
webRTC(),
|
||||
circuitRelayTransport({
|
||||
discoverRelays: 1
|
||||
})
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => false
|
||||
},
|
||||
services: {
|
||||
identify: identify(),
|
||||
pubsub: gossipsub({ allowPublishToZeroPeers: true })
|
||||
pubsub: gossipsub({ allowPublishToZeroTopicPeers: true })
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,25 +39,23 @@ const Libp2pOptions = {
|
||||
*/
|
||||
const Libp2pBrowserOptions = {
|
||||
addresses: {
|
||||
listen: ['/webrtc']
|
||||
listen: ['/webrtc', '/p2p-circuit']
|
||||
},
|
||||
transports: [
|
||||
webSockets({
|
||||
filter: all
|
||||
}),
|
||||
webRTC(),
|
||||
circuitRelayTransport({
|
||||
discoverRelays: 1
|
||||
})
|
||||
circuitRelayTransport()
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
connectionGater: {
|
||||
denyDialMultiaddr: () => false
|
||||
},
|
||||
services: {
|
||||
identify: identify(),
|
||||
pubsub: gossipsub({ allowPublishToZeroPeers: true })
|
||||
pubsub: gossipsub({ allowPublishToZeroTopicPeers: true })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,21 +5,18 @@ import { circuitRelayServer } from '@libp2p/circuit-relay-v2'
|
||||
import { webSockets } from '@libp2p/websockets'
|
||||
import * as filters from '@libp2p/websockets/filters'
|
||||
import { identify } from '@libp2p/identify'
|
||||
import { createFromPrivKey } from '@libp2p/peer-id-factory'
|
||||
import { unmarshalPrivateKey } from '@libp2p/crypto/keys'
|
||||
import { fromString as uint8ArrayFromString } from 'uint8arrays/from-string'
|
||||
import { privateKeyFromProtobuf } from '@libp2p/crypto/keys'
|
||||
|
||||
// output of: console.log(server.peerId.privateKey.toString('hex'))
|
||||
const relayPrivKey = '08011240821cb6bc3d4547fcccb513e82e4d718089f8a166b23ffcd4a436754b6b0774cf07447d1693cd10ce11ef950d7517bad6e9472b41a927cd17fc3fb23f8c70cd99'
|
||||
// the peer id of the above key
|
||||
// const relayId = '12D3KooWAJjbRkp8FPF5MKgMU53aUTxWkqvDrs4zc1VMbwRwfsbE'
|
||||
|
||||
const encoded = uint8ArrayFromString(relayPrivKey, 'hex')
|
||||
const privateKey = await unmarshalPrivateKey(encoded)
|
||||
const peerId = await createFromPrivKey(privateKey)
|
||||
const privateKey = privateKeyFromProtobuf(uint8ArrayFromString(relayPrivKey, 'hex'))
|
||||
|
||||
const server = await createLibp2p({
|
||||
peerId,
|
||||
privateKey,
|
||||
addresses: {
|
||||
listen: ['/ip4/0.0.0.0/tcp/12345/ws']
|
||||
},
|
||||
@ -28,14 +25,13 @@ const server = await createLibp2p({
|
||||
filter: filters.all
|
||||
})
|
||||
],
|
||||
connectionEncryption: [noise()],
|
||||
connectionEncrypters: [noise()],
|
||||
streamMuxers: [yamux()],
|
||||
services: {
|
||||
identify: identify(),
|
||||
relay: circuitRelayServer({
|
||||
reservations: {
|
||||
maxReservations: 5000,
|
||||
reservationTtl: 1000,
|
||||
defaultDataLimit: BigInt(1024 * 1024 * 1024)
|
||||
}
|
||||
})
|
||||
|
Loading…
x
Reference in New Issue
Block a user