Compare commits

...

71 Commits
v2.4.3 ... main

Author SHA1 Message Date
Hayden Young
d9e62979d8 chore: Update changelog. 2025-05-29 13:48:41 +02:00
Hayden Young
ae7f464aea chore: Update simple encryption version. Iterate version. 2025-05-26 19:19:49 +02:00
Hayden Young
1520eae162 chore: Iterate version. 2025-05-26 18:52:12 +02:00
Hayden Young
0cabfdbae7
Merge pull request #1229 from orbitdb/feat/encryption-2
Feat/encryption 2
2025-05-26 18:21:06 +02:00
Hayden Young
b116103628 chore: Use simple-encryption v0.0.1 for encryption tests. 2025-05-26 12:09:01 +02:00
Hayden Young
d0004044f2 chore: Iteration version. 2025-05-26 10:36:51 +02:00
Hayden Young
231976998f test: Enc/dec using simple encryption module. 2025-05-26 10:35:47 +02:00
haad
b539877a39 Fix race conditions upon dropping database 2025-05-20 13:17:17 +03:00
haad
6c89d68a25 Remove browsers tests from Nodejs minimum version tests on CI 2025-05-20 10:02:17 +03:00
haad
85686ba478 Fix timeout and timing bug in Sync 2025-05-20 10:00:37 +03:00
haad
90f84bf12f Encode heads to bytes correctly 2025-05-20 09:50:37 +03:00
Hayden Young
6f72fdddfe fix: Put entry and index before setting heads to avoid retrieving a head not in the database. 2025-05-17 01:22:58 +02:00
haad
dc150b9cc2 Fix a timeout/race condition when closing databases 2025-05-09 12:37:40 +02:00
haad
0295262092 Refactor heads 2025-05-09 12:37:13 +02:00
haad
1d08f996c5 Cancel ipfs block fetches when storage is closed 2025-05-09 12:36:44 +02:00
haad
5dc8ea25fd Rename index to OplogStore 2025-05-09 12:36:24 +02:00
Hayden Young
46160978f8 tests: Remove mock encryption benchmarks. 2025-05-06 14:48:24 +02:00
haad
dccbd1c6a2 doc: More documentation fixes 2025-05-04 10:06:02 +02:00
haad
05e4250b6b docs: Add details to encryption documentation 2025-05-04 09:05:16 +02:00
Hayden Young
a19cbc1edd docs: Encryption benchmarking. 2025-04-22 23:09:26 +02:00
Hayden Young
1f07d25706 test: Benchmark all db types using an encryption mock. 2025-04-22 21:24:13 +02:00
Hayden Young
45f6b972ee docs: Encrypting databases update. 2025-04-22 21:10:46 +02:00
Hayden Young
4512b16b0b docs: A brief explanation of encryption in OrbitDB. 2025-04-22 15:29:03 +02:00
Hayden Young
56997f1724 chore: Fix package lock. 2025-04-18 12:08:40 +02:00
Hayden Young
81c7c2b8b8 Merge branch 'feat/encryption' of github.com:orbitdb/orbitdb into feat/encryption 2025-04-18 11:59:10 +02:00
Hayden Young
116d2fb91e Merge remote-tracking branch 'origin' into feat/encryption 2025-04-18 11:51:29 +02:00
Hayden Young
ea1c16a491 test: Benchmark for OpLog encryption. 2025-04-18 11:49:00 +02:00
Hayden Young
c3d8b37915
Merge pull request #1225 from orbitdb/feat/encryption-pin-oplog-entry
refactor: Pin verified entry.
2025-04-12 08:41:23 +01:00
Hayden Young
58fa496f2f chore: Remove tea registration. 2025-04-02 12:41:51 +02:00
Hayden Young
57204b1bae docs: Encryption. 2025-03-09 19:33:04 +01:00
Hayden Young
2aab9667c4 chore: Bump Helia version. 2025-03-07 17:32:54 +01:00
Hayden Young
a66c4e01bb chore: Name min ver test. 2025-03-07 17:27:08 +01:00
Hayden Young
da1a0873fd chore: Node min ver test. 2025-03-07 17:23:32 +01:00
Hayden Young
03f0b1141e refactor: Pin verified entry. 2025-03-07 12:25:02 +01:00
Hayden Young
6f3b605174 refactor: Separate password encryption into its own module. 2025-03-03 18:20:27 +01:00
Hayden Young
b04d431465 chore: Upgrade helia. 2025-02-28 19:23:55 +01:00
Hayden Young
061592b315 chore: Upgrade playwright-test. 2025-02-28 19:10:19 +01:00
Hayden Young
e144bf760c fix: Pin when adding verified entries. 2025-02-28 18:18:06 +01:00
Hayden Young
7110da6e2d fix: Add the encrypted entry to entries to ensure the entry is pinned. 2025-02-22 16:09:09 +01:00
Hayden Young
5f54993660 Merge remote-tracking branch 'origin' into feat/encryption 2025-02-22 15:27:07 +01:00
Hayden Young
b5926cb972 fix: Deploy pages. 2025-02-22 14:52:08 +01:00
Hayden Young
11c11ffe61 chore: Fix upload pages for api docs. 2025-02-22 14:42:51 +01:00
Hayden Young
494dfdb1ef chore: Fix upload pages for api docs. 2025-02-22 14:36:40 +01:00
Hayden Young
1b470fdb68 chore: Iterate version. 2025-02-22 14:25:51 +01:00
Haad
35bf38d8d8
Merge pull request #1223 from julienmalard/patch-3
Remove disconnected peers
2025-02-21 15:02:25 +02:00
Julien Malard-Adam
c5898dd58a
Remove semicolons 2025-02-21 12:19:51 +01:00
Julien Malard-Adam
8ff138de46
Remove disconnected peers 2025-02-21 09:40:56 +01:00
Hayden Young
3d41ab5d99
Merge pull request #1216 from julienmalard/patch-1
Use `.name` instead of `.code` for errors
2025-02-12 19:29:31 +08:00
Julien Malard-Adam
d504e0f7ba
Update sync.js
Remove console.error, since the error is already caught and reported by events.emit('error') afterwards
2025-02-12 11:15:08 +01:00
Julien Malard-Adam
28b3e807de
Update error name 2025-02-04 12:12:09 -05:00
Julien Malard-Adam
41f015cb13
Use .name instead of .code for errors
Fix for the change in libp2p here: https://github.com/libp2p/js-libp2p/pull/2655
2025-02-04 12:09:11 -05:00
Hayden Young
d290032ebf
Merge pull request #1213 from Alulx/fix/docs-Getting-Started
Updated GETTING_STARTED.md with new instructions
2025-01-28 00:24:48 +08:00
alex
9612a61c6f Updated GETTING_STARTED.md with new instructions 2025-01-25 12:27:57 +01:00
Hayden Young
4fccfda975
Merge pull request #1206 from PradaJoaquin/main
fix: Update relay example to use default reservation ttl setting
2024-11-26 06:28:31 +08:00
Joaquin Prada
adb8d77aa2 fix: Revert default relay settings, keep updated ttl 2024-11-25 19:12:35 -03:00
Joaquin Prada
ed12f2b2cf fix: Update relay example to use default reservation settings 2024-11-25 18:32:14 -03:00
Hayden Young
c173a01389
Merge pull request #1187 from orbitdb/feat/encryption-entries
Feat/encryption entries
2024-07-14 19:56:34 +08:00
Hayden Young
71c2505f20 fix: Load browser crypto lib if available. 2024-07-08 12:52:24 +02:00
haad
d83bfa9fc8 Refactor and add separate PasswordEncryption module 2024-07-07 10:44:22 +03:00
haad
383420750e Move oplog storages to their own module 2024-06-30 09:25:00 +03:00
haad
f30789fece Encrypt and decrypt entries and payloads 2024-06-28 22:13:46 +03:00
Hayden Young
7e5672eff0 fix: Linting. 2024-05-23 15:33:33 +01:00
Hayden Young
f272d3ee3c feat: Encrypt oplog payload. 2024-05-23 15:24:49 +01:00
Hayden Young
90f66cfe5d feat: Pass encrypt configuration as a hierarchical list of params. 2024-05-22 17:05:51 +01:00
Hayden Young
ac3011c873 feat: Encrypt either data, payload or both. 2024-05-21 23:28:29 +01:00
Hayden Young
cce6a524e2 fix: Browser tests. 2024-05-17 15:44:37 +01:00
Hayden Young
888d3e8af3 fix: Linting. 2024-05-17 03:35:38 +01:00
Hayden Young
373d2d8414 feat: Explicitly specify public and private keys. 2024-05-17 03:30:56 +01:00
Hayden Young
796308ca7f Merge remote-tracking branch 'origin/main' into feat/encryption 2024-05-17 03:29:50 +01:00
Hayden Young
aab912581e Merge remote-tracking branch 'origin' into feat/encryption 2024-05-01 15:45:50 +01:00
Hayden Young
ed2c5e7d8c feat: Basic asymmetric encryption using an OrbitDB identity. 2024-05-01 15:44:57 +01:00
35 changed files with 3021 additions and 4569 deletions

View File

@ -25,7 +25,7 @@ jobs:
registry-url: https://registry.npmjs.org/
- run: npm ci
- run: npm run build:docs
- uses: actions/upload-pages-artifact@v2
- uses: actions/upload-pages-artifact@v3
with:
path: ./docs/api/
@ -37,5 +37,5 @@ jobs:
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2 # or the latest "vX.X.X" version tag for this action
uses: actions/deploy-pages@v4 # or the latest "vX.X.X" version tag for this action

View File

@ -0,0 +1,20 @@
---
name: Run Tests (Node Minimum Version)
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
registry-url: https://registry.npmjs.org/
- name: Install dependencies
run: npm ci
- name: Run linter
run: npm run lint
- name: Run tests
run: npm run test:ci

View File

@ -2,10 +2,10 @@
For now, please refer to our Git commit history for a list of changes.
https://github.com/orbitdb/orbitdb/compare/v2.4.2...v2.4.3
https://github.com/orbitdb/orbitdb/compare/v2.5.0...v3.0.2
You can also use the following git command to generate a log of changes:
```
git log v2.4.2..v2.4.3 --oneline
git log v2.5.0..v3.0.2 --oneline
```

View File

@ -109,6 +109,7 @@ You can find more advanced topics in our [docs](https://github.com/orbitdb/orbit
- [Connecting Peers](https://github.com/orbitdb/orbitdb/blob/main/docs/CONNECTING_PEERS.md)
- [Replication](https://github.com/orbitdb/orbitdb/blob/main/docs/REPLICATION.md)
- [Oplog](https://github.com/orbitdb/orbitdb/blob/main/docs/OPLOG.md)
- [Encryption](https://github.com/orbitdb/orbitdb/blob/main/docs/ENCRYPTION.md)
### API

View File

@ -45,8 +45,10 @@ EventEmitter.defaultMaxListeners = 10000
let connected = false
const onJoin = async (peerId) => (connected = true)
const onError = async (err) => console.error(err)
db2.events.on('join', onJoin)
db2.events.on('error', onError)
await waitFor(() => connected, () => true)

View File

@ -38,6 +38,8 @@ export default (env, argv) => {
],
fallback: {
path: require.resolve('path-browserify'),
crypto: false,
stream: require.resolve('stream-browserify'),
process: false
}
},

94
docs/ENCRYPTION.md Normal file
View File

@ -0,0 +1,94 @@
# Encryption
OrbitDB features a modular architecture for database encryption. By passing a module to an OrbitDB database, different encryption methods can be employed.
OrbitDB project currently maintains a [SimpleEncryption](https://github.com/orbitdb/simple-encryption) module that can be used to get started with encrypted databases.
**WARNING:** SimpleEncryption is an unaudited encryption module. Use at your own risk.
## How it works
OrbitDB encrypts databases in two layers: encrypting the payload and encrypting the log entry.
Log entry encryption encrypts each log entry fully. Payload encryption encrypts just the value.
This makes it possible to separate users of a database and replicators of a database, ie. an orbitdb peer can replicate a database without being able to decrypt the payloads (=data) of the database.
## Configuring encryption
You can configure OrbitDB to encrypt either the payload data being stored or the entire database.
To ***encrypt payload data only***, specify an encryption module and pass it to OtbiDB via the encryption object using the `data` property:
```
const data = await EncryptionModule()
const encryption = { data }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
To ***encrypt the database log entries***, specify an encryption module and pass it to OrbitDB via the encryption object using the `replication` property:
```
const replication = await EncryptionModule()
const encryption = { replication }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
To ***encrypt the whole database***, payload data and oplog entries separately, specify an encryption module and pass it to OrbitDB via the encryption object using both the `replication` and `data` properties:
```
const replication = await EncryptionModule()
const data = await EncryptionModule()
const encryption = { replication, data }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
## Encrypting Databases
OrbitDB provides simple password-based encryption via an external module called [SimpleEncryption](https://github.com/orbitdb/simple-encryption).
**WARNING:** This is an unaudited encryption module. Use at your own risk.
To install SimpleEncryption:
```
npm i @orbitdb/simple-encryption
```
To use encryption, initiate SimpleEncryption with a password and pass it to OrbitDB when opening your database:
```js
import { SimpleEncryption } from '@orbitdb/simple-encryption'
const replication = await SimpleEncryption({ password: 'hello' })
const data = await SimpleEncryption({ password: 'world' })
const encryption = { data, replication }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
If you wish to use another encryption type, simply replace SimpleEncryption with the module of your choice.
## Custom Encryption
To implement a custom encryption module for OrbitDB, expose encrypt and decrypt functions:
```
const CustomEncryption = async () => {
const encrypt = (value) => {
// return encrypted value
}
const decrypt = (value) => {
// return decrypted value
}
return {
encrypt,
decrypt
}
}
export default CustomEncryption
```

View File

@ -206,6 +206,7 @@ import { createHelia } from 'helia'
import { createOrbitDB, IPFSAccessController } from '@orbitdb/core'
import { LevelBlockstore } from 'blockstore-level'
import { Libp2pOptions } from './config/libp2p.js'
import { multiaddr } from '@multiformats/multiaddr'
const main = async () => {
// create a random directory to avoid OrbitDB conflicts.
@ -219,7 +220,9 @@ const main = async () => {
let db
if (process.argv[2]) {
if (process.argv[2] && process.argv[3]) {
await orbitdb.ipfs.libp2p.dial(multiaddr(process.argv[3]))
console.log('opening db', process.argv[2])
db = await orbitdb.open(process.argv[2])
} else {
// When we open a new database, write access is only available to the
@ -230,6 +233,8 @@ const main = async () => {
// access using grant and revoke.
db = await orbitdb.open('my-db', { AccessController: IPFSAccessController({ write: ['*']}) })
console.log('libp2p address', '(copy one of these addresses then dial into this node from the second node)', orbitdb.ipfs.libp2p.getMultiaddrs())
// Copy this output if you want to connect a peer to another.
console.log('my-db address', '(copy my db address and use when launching peer 2)', db.address)
}
@ -263,27 +268,33 @@ const main = async () => {
main()
```
Open two consoles in your command line terminal.
Launch peer 1 from the terminal:
In terminal 1, run the first peer:
```sh
node index.js
```bash
node test.js
```
When running, you should see the address of the database, for example:
Once launched you will see some output which may look something like this:
```sh
```
libp2p address (copy one of these addresses then dial into this node from the second node) [
Multiaddr(/ip4/127.0.0.1/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF),
Multiaddr(/ip4/192.168.1.22/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF),
Multiaddr(/ip4/100.64.100.6/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF)
]
my-db address (copy my db address and use when launching peer 2) /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj
```
Copy the database's address from terminal 1 and, in terminal 2, run:
It contains the libp2p address and db address. You will need both of these when connecting from peer 2.
```sh
node index.js /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj
Open another terminal and launch peer 2. The command takes the form `node test.js <orbitdb-address> <libp2p-address>`
```bash
node test.js /orbitdb/zdpuB2aYUCnZ7YUBrDkCWpRLQ8ieUbqJEVRZEd5aDhJBDpBqj /ip4/127.0.0.1/tcp/36161/p2p/12D3KooWKFWB78Hka2uPVNYYoXfucWp6rDLsQzr5CFiP67NAo7YF
```
Both peers will print new records to the terminal as the log is updated. When you stop each peer using ctrl+c, the final state of the database will be printed to the terminal. They should match.
What is happening is the second peer is dialing the first peer on the /ip4/ address then opens the database.
**PLEASE NOTE:**
@ -296,3 +307,5 @@ These kinds of connectivity configurations are beyond the scope of OrbitDB. To f
[Databases](./DATABASES.md) covers database management and data entry in more detail.
[Replication](./REPLICATION.md) provides a comprehensive overview of how to perform data replication across multiple peers.
[Encryption](./ENCRYPTION.md) discusses database encryption using OrbitDB's modular architecture.

View File

@ -1,4 +1,4 @@
## OrbitDB API - v2.4
## OrbitDB API - v3.0
OrbitDB is a serverless, distributed, peer-to-peer database. OrbitDB uses IPFS
as its data storage and Libp2p Pubsub to automatically sync databases with peers. It's an eventually consistent database that uses Merkle-CRDTs for conflict-free database writes and merges making OrbitDB an excellent choice for p2p and decentralized apps, blockchain applications and local first web applications.

6324
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "@orbitdb/core",
"version": "2.4.3",
"version": "3.0.2",
"description": "Distributed p2p database on IPFS",
"author": "Haad",
"license": "MIT",
@ -31,16 +31,17 @@
"devDependencies": {
"@chainsafe/libp2p-gossipsub": "^14.1.0",
"@libp2p/circuit-relay-v2": "^3.1.0",
"@orbitdb/simple-encryption": "^0.0.2",
"blockstore-level": "^2.0.1",
"c8": "^8.0.1",
"cross-env": "^7.0.3",
"fs-extra": "^11.2.0",
"helia": "^5.1.0",
"helia": "^5.3.0",
"it-all": "^3.0.4",
"jsdoc": "^4.0.2",
"mocha": "^10.2.0",
"path-browserify": "^1.0.1",
"playwright-test": "^14.1.6",
"playwright-test": "^14.1.9",
"rimraf": "^5.0.5",
"standard": "^17.1.0",
"webpack": "^5.89.0",

View File

@ -7,7 +7,7 @@
import { EventEmitter } from 'events'
import PQueue from 'p-queue'
import Sync from './sync.js'
import { Log, Entry } from './oplog/index.js'
import { Log } from './oplog/index.js'
import { ComposedStorage, LRUStorage, IPFSBlockStorage, LevelStorage } from './storage/index.js'
import pathJoin from './utils/path-join.js'
@ -39,10 +39,12 @@ const defaultCacheSize = 1000
* automatically. Otherwise, false.
* @param {function} [params.onUpdate] A function callback. Fired when an
* entry is added to the oplog.
* @param {Function} options.encryptFn An encryption function.
* @param {Function} options.decryptFn A decryption function.
* @return {module:Databases~Database} An instance of Database.
* @instance
*/
const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
/**
* @namespace module:Databases~Database
* @description The instance returned by {@link module:Database~Database}.
@ -108,7 +110,9 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
await LevelStorage({ path: pathJoin(directory, '/log/_index/') })
)
const log = await Log(identity, { logId: address, access, entryStorage, headsStorage, indexStorage })
encryption = encryption || {}
const log = await Log(identity, { logId: address, access, entryStorage, headsStorage, indexStorage, encryption })
const events = new EventEmitter()
@ -134,21 +138,23 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
return entry.hash
}
const hash = await queue.add(task)
await queue.onIdle()
return hash
}
const applyOperation = async (bytes) => {
const applyOperation = async (entry) => {
const task = async () => {
const entry = await Entry.decode(bytes)
if (entry) {
const updated = await log.joinEntry(entry)
if (updated) {
if (onUpdate) {
await onUpdate(log, entry)
try {
if (entry) {
const updated = await log.joinEntry(entry)
if (updated) {
if (onUpdate) {
await onUpdate(log, entry)
}
events.emit('update', entry)
}
events.emit('update', entry)
}
} catch (e) {
console.error(e)
}
}
await queue.add(task)
@ -177,7 +183,7 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
* @async
*/
const drop = async () => {
await queue.onIdle()
await queue.clear()
await log.clear()
if (access && access.drop) {
await access.drop()

View File

@ -25,8 +25,8 @@ const DefaultOptions = { indexBy: '_id' }
* @return {module:Databases.Databases-Documents} A Documents function.
* @memberof module:Databases
*/
const Documents = ({ indexBy } = DefaultOptions) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically })
const Documents = ({ indexBy } = DefaultOptions) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encrypt }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, encrypt })
const { addOperation, log } = database

View File

@ -15,8 +15,8 @@ const type = 'events'
* @return {module:Databases.Databases-Events} A Events function.
* @memberof module:Databases
*/
const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate })
const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption })
const { addOperation, log } = database

View File

@ -109,7 +109,7 @@ const Index = ({ directory } = {}) => async () => {
* function.
* @memberof module:Databases
*/
const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
// Set up the directory for an index
directory = pathJoin(directory || './orbitdb', `./${address}/_index/`)
@ -117,7 +117,7 @@ const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, di
const index = await Index({ directory })()
// Set up the underlying KeyValue database
const keyValueStore = await KeyValue()({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate: index.update })
const keyValueStore = await KeyValue()({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate: index.update, encryption })
/**
* Gets a value from the store by key.

View File

@ -15,8 +15,8 @@ const type = 'keyvalue'
* @return {module:Databases.Databases-KeyValue} A KeyValue function.
* @memberof module:Databases
*/
const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate })
const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption })
const { addOperation, log } = database

View File

@ -55,7 +55,7 @@ const hashStringEncoding = base58btc
* // { payload: "hello", next: [], ... }
* @private
*/
const create = async (identity, id, payload, clock = null, next = [], refs = []) => {
const create = async (identity, id, payload, encryptPayloadFn, clock = null, next = [], refs = []) => {
if (identity == null) throw new Error('Identity is required, cannot create entry')
if (id == null) throw new Error('Entry requires an id')
if (payload == null) throw new Error('Entry requires a payload')
@ -63,9 +63,16 @@ const create = async (identity, id, payload, clock = null, next = [], refs = [])
clock = clock || Clock(identity.publicKey)
let encryptedPayload
if (encryptPayloadFn) {
const { bytes: encodedPayloadBytes } = await Block.encode({ value: payload, codec, hasher })
encryptedPayload = await encryptPayloadFn(encodedPayloadBytes)
}
const entry = {
id, // For determining a unique chain
payload, // Can be any dag-cbor encodeable data
payload: encryptedPayload || payload, // Can be any dag-cbor encodeable data
next, // Array of strings of CIDs
refs, // Array of strings of CIDs
clock, // Clock
@ -78,8 +85,13 @@ const create = async (identity, id, payload, clock = null, next = [], refs = [])
entry.key = identity.publicKey
entry.identity = identity.hash
entry.sig = signature
entry.payload = payload
return encode(entry)
if (encryptPayloadFn) {
entry._payload = encryptedPayload
}
return entry
}
/**
@ -97,13 +109,15 @@ const verify = async (identities, entry) => {
if (!entry.key) throw new Error("Entry doesn't have a key")
if (!entry.sig) throw new Error("Entry doesn't have a signature")
const e = Object.assign({}, entry)
const value = {
id: entry.id,
payload: entry.payload,
next: entry.next,
refs: entry.refs,
clock: entry.clock,
v: entry.v
id: e.id,
payload: e._payload || e.payload,
next: e.next,
refs: e.refs,
clock: e.clock,
v: e.v
}
const { bytes } = await Block.encode({ value, codec, hasher })
@ -136,7 +150,7 @@ const isEntry = (obj) => {
* @private
*/
const isEqual = (a, b) => {
return a && b && a.hash === b.hash
return a && b && a.hash && a.hash === b.hash
}
/**
@ -146,13 +160,39 @@ const isEqual = (a, b) => {
* @memberof module:Log~Entry
* @private
*/
const decode = async (bytes) => {
const { cid, value } = await Block.decode({ bytes, codec, hasher })
const decode = async (bytes, decryptEntryFn, decryptPayloadFn) => {
let cid
if (decryptEntryFn) {
try {
const encryptedEntry = await Block.decode({ bytes, codec, hasher })
bytes = await decryptEntryFn(encryptedEntry.value)
cid = encryptedEntry.cid
} catch (e) {
throw new Error('Could not decrypt entry')
}
}
const decodedEntry = await Block.decode({ bytes, codec, hasher })
const entry = decodedEntry.value
if (decryptPayloadFn) {
try {
const decryptedPayloadBytes = await decryptPayloadFn(entry.payload)
const { value: decryptedPayload } = await Block.decode({ bytes: decryptedPayloadBytes, codec, hasher })
entry._payload = entry.payload
entry.payload = decryptedPayload
} catch (e) {
throw new Error('Could not decrypt payload')
}
}
cid = cid || decodedEntry.cid
const hash = cid.toString(hashStringEncoding)
return {
...value,
hash,
bytes
...entry,
hash
}
}
@ -163,13 +203,28 @@ const decode = async (bytes) => {
* @memberof module:Log~Entry
* @private
*/
const encode = async (entry) => {
const { cid, bytes } = await Block.encode({ value: entry, codec, hasher })
const encode = async (entry, encryptEntryFn, encryptPayloadFn) => {
const e = Object.assign({}, entry)
if (encryptPayloadFn) {
e.payload = e._payload
}
delete e._payload
delete e.hash
let { cid, bytes } = await Block.encode({ value: e, codec, hasher })
if (encryptEntryFn) {
bytes = await encryptEntryFn(bytes)
const encryptedEntry = await Block.encode({ value: bytes, codec, hasher })
cid = encryptedEntry.cid
bytes = encryptedEntry.bytes
}
const hash = cid.toString(hashStringEncoding)
const clock = Clock(entry.clock.id, entry.clock.time)
return {
...entry,
clock,
hash,
bytes
}

View File

@ -9,19 +9,17 @@ import MemoryStorage from '../storage/memory.js'
const DefaultStorage = MemoryStorage
const Heads = async ({ storage, heads }) => {
const Heads = async ({ storage, heads, decryptPayloadFn, decryptEntryFn }) => {
storage = storage || await DefaultStorage()
const encoder = new TextEncoder()
const decoder = new TextDecoder()
const put = async (heads) => {
heads = findHeads(heads)
for (const head of heads) {
await storage.put(head.hash, head.bytes)
}
}
const set = async (heads) => {
await storage.clear()
await put(heads)
const newHeads = heads.map(e => ({ hash: e.hash, next: e.next }))
const bytes = encoder.encode(JSON.stringify(newHeads))
await storage.put('heads', bytes)
}
const add = async (head) => {
@ -30,22 +28,21 @@ const Heads = async ({ storage, heads }) => {
return
}
const newHeads = findHeads([...currentHeads, head])
await set(newHeads)
await put(newHeads)
return newHeads
}
const remove = async (hash) => {
const currentHeads = await all()
const newHeads = currentHeads.filter(e => e.hash !== hash)
await set(newHeads)
await put(newHeads)
}
const iterator = async function * () {
const it = storage.iterator()
for await (const [, bytes] of it) {
const head = await Entry.decode(bytes)
yield head
const bytes = await storage.get('heads')
const headHashes = bytes ? JSON.parse(decoder.decode(bytes)) : []
for (const hash of headHashes) {
yield hash
}
}
@ -66,11 +63,13 @@ const Heads = async ({ storage, heads }) => {
}
// Initialize the heads if given as parameter
await put(heads || [])
if (heads) {
await put(heads)
}
return {
put,
set,
set: put,
add,
remove,
iterator,

View File

@ -10,18 +10,14 @@ import LRU from 'lru'
import PQueue from 'p-queue'
import Entry from './entry.js'
import Clock, { tickClock } from './clock.js'
import Heads from './heads.js'
import ConflictResolution from './conflict-resolution.js'
import MemoryStorage from '../storage/memory.js'
import OplogStore from './oplog-store.js'
const { LastWriteWins, NoZeroes } = ConflictResolution
const randomId = () => new Date().getTime().toString()
const maxClockTimeReducer = (res, acc) => Math.max(res, acc.clock.time)
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
const DefaultStorage = MemoryStorage
// Default AccessController for the Log.
// Default policy is that anyone can write to the Log.
// Signature of an entry will always be verified regardless of AccessController policy.
@ -56,7 +52,7 @@ const DefaultAccessController = async () => {
* @memberof module:Log
* @instance
*/
const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn } = {}) => {
const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn, encryption } = {}) => {
/**
* @namespace Log
* @description The instance returned by {@link module:Log}
@ -68,20 +64,23 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
if (logHeads != null && !Array.isArray(logHeads)) {
throw new Error('\'logHeads\' argument must be an array')
}
// Set Log's id
const id = logId || randomId()
// Encryption of entries and payloads
encryption = encryption || {}
const encryptPayloadFn = encryption.data?.encrypt
// Access Controller
access = access || await DefaultAccessController()
// Oplog entry storage
const _entries = entryStorage || await DefaultStorage()
// Entry index for keeping track which entries are already in the log
const _index = indexStorage || await DefaultStorage()
// Heads storage
headsStorage = headsStorage || await DefaultStorage()
// Add heads to the state storage, ie. init the log state
const _heads = await Heads({ storage: headsStorage, heads: logHeads })
// Index and storage of entries for this Log
const oplogStore = await OplogStore({ logHeads, entryStorage, indexStorage, headsStorage, encryption })
// Conflict-resolution sorting function
sortFn = NoZeroes(sortFn || LastWriteWins)
// Internal queues for processing appends and joins in their call-order
const appendQueue = new PQueue({ concurrency: 1 })
const joinQueue = new PQueue({ concurrency: 1 })
@ -106,8 +105,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance
*/
const heads = async () => {
const res = await _heads.all()
return res.sort(sortFn).reverse()
const heads_ = await oplogStore.heads()
return heads_.sort(sortFn).reverse()
}
/**
@ -134,16 +133,14 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance
*/
const get = async (hash) => {
const bytes = await _entries.get(hash)
if (bytes) {
const entry = await Entry.decode(bytes)
return entry
if (!hash) {
throw new Error('hash is required')
}
return oplogStore.get(hash)
}
const has = async (hash) => {
const entry = await _index.get(hash)
return entry != null
return oplogStore.has(hash)
}
/**
@ -162,6 +159,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
// 2. Authorize entry
// 3. Store entry
// 4. return Entry
// Get current heads of the log
const heads_ = await heads()
// Create the next pointers from heads
@ -169,29 +167,29 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
// Get references (pointers) to multiple entries in the past
// (skips the heads which are covered by the next field)
const refs = await getReferences(heads_, options.referencesCount + heads_.length)
// Create the entry
const entry = await Entry.create(
identity,
id,
data,
encryptPayloadFn,
tickClock(await clock()),
nexts,
refs
)
// Authorize the entry
const canAppend = await access.canAppend(entry)
if (!canAppend) {
throw new Error(`Could not append entry:\nKey "${identity.hash}" is not allowed to write to the log`)
}
// The appended entry is now the latest head
await _heads.set([entry])
// Add entry to the entry storage
await _entries.put(entry.hash, entry.bytes)
// Add entry to the entry index
await _index.put(entry.hash, true)
// Add the entry to the oplog store (=store and index it)
const hash = await oplogStore.setHead(entry)
// Return the appended entry
return entry
return { ...entry, hash }
}
return appendQueue.add(task)
@ -218,9 +216,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
if (!isLog(log)) {
throw new Error('Given argument is not an instance of Log')
}
if (_entries.merge) {
await _entries.merge(log.storage)
}
await oplogStore.storage.merge(log.storage)
const heads = await log.heads()
for (const entry of heads) {
await joinEntry(entry)
@ -302,21 +298,12 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
await traverseAndVerify()
/* 4. Add missing entries to the index (=to the log) */
for (const hash of hashesToAdd.values()) {
await _index.put(hash, true)
}
/* 5. Remove heads which new entries are connect to */
for (const hash of connectedHeads.values()) {
await _heads.remove(hash)
}
/* 6. Add new entry to entries (for pinning) */
await _entries.put(entry.hash, entry.bytes)
/* 6. Add the new entry to heads (=union with current heads) */
await _heads.add(entry)
/* 4. Add missing entries to the oplog store (=to the log) */
await oplogStore.addVerified(hashesToAdd.values())
/* 6. Remove heads which new entries are connect to */
await oplogStore.removeHeads(connectedHeads.values())
/* 7. Add the new entry to heads (=union with current heads) */
await oplogStore.addHead(entry)
return true
}
@ -501,9 +488,9 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance
*/
const clear = async () => {
await _index.clear()
await _heads.clear()
await _entries.clear()
await appendQueue.clear()
await joinQueue.clear()
await oplogStore.clear()
}
/**
@ -512,9 +499,9 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance
*/
const close = async () => {
await _index.close()
await _heads.close()
await _entries.close()
await appendQueue.onIdle()
await joinQueue.onIdle()
await oplogStore.close()
}
/**
@ -570,7 +557,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
close,
access,
identity,
storage: _entries
storage: oplogStore.storage,
encryption
}
}

112
src/oplog/oplog-store.js Normal file
View File

@ -0,0 +1,112 @@
import Entry from './entry.js'
import Heads from './heads.js'
import MemoryStorage from '../storage/memory.js'
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
const DefaultStorage = MemoryStorage
const OplogStore = async ({ logHeads, entryStorage, headsStorage, indexStorage, encryption }) => {
// Setup encryption and decryption functions
const encryptEntryFn = encryption?.replication?.encrypt
const decryptEntryFn = encryption?.replication?.decrypt
const encryptPayloadFn = encryption?.data?.encrypt
const decryptPayloadFn = encryption?.data?.decrypt
// Oplog entry storage
const _entries = entryStorage || await DefaultStorage()
// Entry index for keeping track which entries are already in the log
const _index = indexStorage || await DefaultStorage()
// Heads storage
headsStorage = headsStorage || await DefaultStorage()
// Add heads to the state storage, ie. init the log state
const _heads = await Heads({ storage: headsStorage, heads: logHeads, decryptPayloadFn, decryptEntryFn })
const get = async (hash) => {
const bytes = await _entries.get(hash)
if (bytes) {
const entry = await Entry.decode(bytes, decryptEntryFn, decryptPayloadFn)
return entry
}
}
const getBytes = async (hash) => {
return _entries.get(hash)
}
const has = async (hash) => {
const entry = await _index.get(hash)
return entry != null
}
const heads = async () => {
const heads_ = []
for (const { hash } of await _heads.all()) {
const head = await get(hash)
heads_.push(head)
}
return heads_
}
const setHead = async (entry) => {
const { hash, bytes } = await Entry.encode(entry, encryptEntryFn, encryptPayloadFn)
// Add entry to the entry storage
await _entries.put(hash, bytes)
// Add entry to the entry index
await _index.put(hash, true)
// The appended entry is now the latest head
await _heads.set([{ hash, next: entry.next }])
return hash
}
const addHead = async (entry) => {
/* 7. Add the new entry to heads (=union with current heads) */
await _heads.add(entry)
return entry.hash
}
const removeHeads = async (hashes) => {
/* 5. Remove heads which new entries are connect to */
for (const hash of hashes) {
await _heads.remove(hash)
}
}
const addVerified = async (hashes) => {
/* 4. Add missing entries to the index (=to the log) */
for (const hash of hashes) {
await _index.put(hash, true)
/* 5. Add new entry to entries (for pinning) */
if (_entries.persist) {
await _entries.persist(hash)
}
}
}
const clear = async () => {
await _index.clear()
await _heads.clear()
await _entries.clear()
}
const close = async () => {
await _index.close()
await _heads.close()
await _entries.close()
}
return {
get,
getBytes,
has,
heads,
setHead,
addHead,
removeHeads,
addVerified,
storage: _entries,
clear,
close
}
}
export default OplogStore

View File

@ -104,6 +104,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
* @param {module:Storage} [params.indexStorage=[ComposedStorage]{@link module:Storage.Storage-Composed}] A compatible storage instance for storing an " index of log entries. Defaults to ComposedStorage(LRUStorage, LevelStorage).
* @param {number} [params.referencesCount] The number of references to
* use for [Log]{@link module:Log} entries.
* @param {number} [params.encryption] Encryption module to encrypt/decrypt database payloads and entries. If provided, the encryption object must take the form { replication: { encrypt, decrypt }, data: { encrypt, decrypt } }.
* @memberof module:OrbitDB
* @return {module:Database} A database instance.
* @throws "Unsupported database type" if the type specified is not in the list
@ -112,7 +113,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
* @instance
* @async
*/
const open = async (address, { type, meta, sync, Database, AccessController, headsStorage, entryStorage, indexStorage, referencesCount } = {}) => {
const open = async (address, { type, meta, sync, Database, AccessController, headsStorage, entryStorage, indexStorage, referencesCount, encryption } = {}) => {
let name, manifest, accessController
if (databases[address]) {
@ -153,7 +154,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
address = address.toString()
const db = await Database({ ipfs, identity, address, name, access: accessController, directory, meta, syncAutomatically: sync, headsStorage, entryStorage, indexStorage, referencesCount })
const db = await Database({ ipfs, identity, address, name, access: accessController, directory, meta, syncAutomatically: sync, headsStorage, entryStorage, indexStorage, referencesCount, encryption })
db.events.on('close', onDatabaseClosed(address))

View File

@ -114,6 +114,16 @@ const ComposedStorage = async (storage1, storage2) => {
await storage2.clear()
}
const persist = async (hash) => {
if (storage1.persist) {
await storage1.persist(hash)
}
if (storage2.persist) {
await storage2.persist(hash)
}
}
/**
* Calls close on each of the composed storages.
* @function
@ -129,6 +139,7 @@ const ComposedStorage = async (storage1, storage2) => {
put,
get,
del,
persist,
iterator,
merge,
clear,

View File

@ -28,6 +28,8 @@ const DefaultTimeout = 30000 // 30 seconds
const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
if (!ipfs) throw new Error('An instance of ipfs is required.')
const timeoutControllers = new Set()
/**
* Puts data to an IPFS block.
* @function
@ -41,9 +43,7 @@ const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
const { signal } = new TimeoutController(timeout || DefaultTimeout)
await ipfs.blockstore.put(cid, data, { signal })
if (pin && !(await ipfs.pins.isPinned(cid))) {
await drain(ipfs.pins.add(cid))
}
await persist(hash)
}
const del = async (hash) => {}
@ -58,25 +58,40 @@ const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
*/
const get = async (hash) => {
const cid = CID.parse(hash, base58btc)
const { signal } = new TimeoutController(timeout || DefaultTimeout)
const block = await ipfs.blockstore.get(cid, { signal })
const controller = new TimeoutController(timeout || DefaultTimeout)
timeoutControllers.add(controller)
const block = await ipfs.blockstore.get(cid, { signal: controller.signal })
timeoutControllers.delete(controller)
if (block) {
return block
}
}
const persist = async (hash) => {
const cid = CID.parse(hash, base58btc)
if (pin && !(await ipfs.pins.isPinned(cid))) {
await drain(ipfs.pins.add(cid))
}
}
const iterator = async function * () {}
const merge = async (other) => {}
const clear = async () => {}
const close = async () => {}
const close = async () => {
for (const controller in timeoutControllers) {
controller.abort()
}
timeoutControllers.clear()
}
return {
put,
del,
get,
persist,
iterator,
merge,
clear,

View File

@ -3,6 +3,7 @@ import PQueue from 'p-queue'
import { EventEmitter } from 'events'
import { TimeoutController } from 'timeout-abort-controller'
import pathJoin from './utils/path-join.js'
import { Entry } from './oplog/index.js'
const DefaultTimeout = 30000 // 30 seconds
@ -134,7 +135,7 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
*/
events = events || new EventEmitter()
timeout = timeout || DefaultTimeout
timeout ??= DefaultTimeout
let started = false
@ -146,7 +147,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const sendHeads = (source) => {
return (async function * () {
const heads = await log.heads()
for await (const { bytes } of heads) {
for await (const { hash } of heads) {
const bytes = await log.storage.get(hash)
yield bytes
}
})()
@ -156,7 +158,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
for await (const value of source) {
const headBytes = value.subarray()
if (headBytes && onSynced) {
await onSynced(headBytes)
const entry = await Entry.decode(headBytes, log.encryption.replication?.decrypt, log.encryption.data?.decrypt)
await onSynced(entry)
}
}
if (started) {
@ -194,9 +197,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const stream = await libp2p.dialProtocol(remotePeer, headsSyncAddress, { signal })
await pipe(sendHeads, stream, receiveHeads(peerId))
} catch (e) {
console.error(e)
peers.delete(peerId)
if (e.code === 'ERR_UNSUPPORTED_PROTOCOL') {
if (e.name === 'UnsupportedProtocolError') {
// Skip peer, they don't have this database currently
} else {
events.emit('error', e)
@ -220,7 +222,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const task = async () => {
try {
if (data && onSynced) {
await onSynced(data)
const entry = await Entry.decode(data, log.encryption.replication?.decrypt, log.encryption.data?.decrypt)
await onSynced(entry)
}
} catch (e) {
events.emit('error', e)
@ -232,6 +235,10 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
}
}
const handlePeerDisconnected = async event => {
peers.delete(event.detail.toString())
}
/**
* Add a log entry to the Sync Protocol to be sent to peers.
* @function add
@ -240,8 +247,9 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
* @instance
*/
const add = async (entry) => {
if (started) {
await pubsub.publish(address, entry.bytes)
if (started && entry && entry.hash) {
const bytes = await log.storage.get(entry.hash)
await pubsub.publish(address, bytes)
}
}
@ -254,11 +262,12 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const stopSync = async () => {
if (started) {
started = false
await queue.onIdle()
await queue.clear()
pubsub.removeEventListener('subscription-change', handlePeerSubscribed)
pubsub.removeEventListener('message', handleUpdateMessage)
await libp2p.unhandle(headsSyncAddress)
await pubsub.unsubscribe(address)
libp2p.removeEventListener('peer:disconnect', handlePeerDisconnected)
peers.clear()
}
}
@ -271,12 +280,14 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
*/
const startSync = async () => {
if (!started) {
// Exchange head entries with peers when connected
await libp2p.handle(headsSyncAddress, handleReceiveHeads)
pubsub.addEventListener('subscription-change', handlePeerSubscribed)
pubsub.addEventListener('message', handleUpdateMessage)
// Subscribe to the pubsub channel for this database through which updates are sent
await pubsub.subscribe(address)
// Exchange head entries with peers when connected
await libp2p.handle(headsSyncAddress, handleReceiveHeads)
// Remove disconnected peers from `peers`, as otherwise they will not resync heads on reconnection
libp2p.addEventListener('peer:disconnect', handlePeerDisconnected)
started = true
}
}

View File

@ -1,7 +0,0 @@
# https://tea.xyz/what-is-this-file
---
version: 1.0.0
codeOwners:
- '0xe97f30348D02869eC312B85d85F235BBF2Fd925f'
- '0x5c7ec6be1f3EA9372de9a39a81bb0628C9A11E81'
quorum: 1

View File

@ -196,27 +196,20 @@ describe('Database - Replication', function () {
db1 = await Database({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1', entryStorage: storage1 })
db2 = await Database({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2', entryStorage: storage2 })
let connected1 = false
let connected2 = false
let connected = false
const onConnected1 = (peerId, heads) => {
connected1 = true
const onConnected = (peerId, heads) => {
connected = true
}
const onConnected2 = (peerId, heads) => {
connected2 = true
}
db1.events.on('join', onConnected1)
db2.events.on('join', onConnected2)
db2.events.on('join', onConnected)
await db1.addOperation({ op: 'PUT', key: 1, value: 'record 1 on db 1' })
await db1.addOperation({ op: 'PUT', key: 2, value: 'record 2 on db 1' })
await db1.addOperation({ op: 'PUT', key: 3, value: 'record 3 on db 1' })
await db1.addOperation({ op: 'PUT', key: 4, value: 'record 4 on db 1' })
await waitFor(() => connected1, () => true)
await waitFor(() => connected2, () => true)
await waitFor(() => connected, () => true)
const all1 = []
for await (const item of db1.log.iterator()) {

View File

@ -3,7 +3,7 @@ import { rimraf } from 'rimraf'
import { existsSync } from 'fs'
import { copy } from 'fs-extra'
import Path from 'path'
import { Database, Entry, KeyStore, Identities } from '../src/index.js'
import { Database, KeyStore, Identities } from '../src/index.js'
import LevelStorage from '../src/storage/level.js'
import MemoryStorage from '../src/storage/memory.js'
import testKeysPath from './fixtures/test-keys-path.js'
@ -68,8 +68,13 @@ describe('Database', function () {
describe('Options', () => {
it('uses default directory for headsStorage', async () => {
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
const hash = await db.addOperation(op)
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const headsPath = Path.join('./orbitdb/', `${databaseId}/`, '/log/_heads/')
@ -78,8 +83,13 @@ describe('Database', function () {
await db.close()
const headsStorage = await LevelStorage({ path: headsPath })
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op)
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await headsStorage.close()
@ -88,8 +98,12 @@ describe('Database', function () {
it('uses given directory for headsStorage', async () => {
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './custom-directory' })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
const hash = await db.addOperation(op)
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const headsPath = Path.join('./custom-directory/', `${databaseId}/`, '/log/_heads/')
@ -99,7 +113,13 @@ describe('Database', function () {
const headsStorage = await LevelStorage({ path: headsPath })
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op)
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await headsStorage.close()
@ -110,23 +130,51 @@ describe('Database', function () {
it('uses given MemoryStorage for headsStorage', async () => {
const headsStorage = await MemoryStorage()
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
const hash = await db.addOperation(op)
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op)
await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await db.close()
await headsStorage.close()
await rimraf('./orbitdb')
})
it('uses given MemoryStorage for entryStorage', async () => {
const entryStorage = await MemoryStorage()
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', entryStorage })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
const hash = await db.addOperation(op)
const headsStorage = await MemoryStorage()
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage, entryStorage })
const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
deepStrictEqual((await Entry.decode(await entryStorage.get(hash))).payload, op)
await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await db.close()
await entryStorage.close()
await headsStorage.close()
await rimraf('./orbitdb')
})
})

View File

@ -320,11 +320,12 @@ describe('KeyValueIndexed Database Replication', function () {
})
it('indexes deletes correctly', async () => {
const databaseId = 'kv-CCC'
let replicated = false
let err
const onError = (err) => {
console.error(err)
deepStrictEqual(err, undefined)
const onError = (error) => {
err = error
}
kv1 = await KeyValueIndexed()({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb11' })
@ -339,11 +340,11 @@ describe('KeyValueIndexed Database Replication', function () {
kv2 = await KeyValueIndexed()({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb22' })
const onUpdate = (entry) => {
const onConnected = (entry) => {
replicated = true
}
kv2.events.on('update', onUpdate)
kv2.events.on('join', onConnected)
kv2.events.on('error', onError)
await waitFor(() => replicated, () => true)
@ -358,6 +359,8 @@ describe('KeyValueIndexed Database Replication', function () {
all2.push(keyValue)
}
deepStrictEqual(err, undefined)
deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [
{ key: 'init', value: true },
{ key: 'hello', value: 'friend' }

View File

@ -33,7 +33,8 @@ describe('Entry', function () {
it('creates a an empty entry', async () => {
const expectedHash = 'zdpuAsKzwUEa8cz9pkJxxFMxLuP3cutA9PDGoLZytrg4RSVEa'
const entry = await create(testIdentity, 'A', 'hello')
strictEqual(entry.hash, expectedHash)
const { hash } = await Entry.encode(entry)
strictEqual(hash, expectedHash)
strictEqual(entry.id, 'A')
strictEqual(entry.clock.id, testIdentity.publicKey)
strictEqual(entry.clock.time, 0)
@ -47,7 +48,8 @@ describe('Entry', function () {
const expectedHash = 'zdpuAmthfqpHRQjdSpKN5etr1GrreJb7QcU1Hshm6pERnzsxi'
const payload = 'hello world'
const entry = await create(testIdentity, 'A', payload)
strictEqual(entry.hash, expectedHash)
const { hash } = await Entry.encode(entry)
strictEqual(hash, expectedHash)
strictEqual(entry.payload, payload)
strictEqual(entry.id, 'A')
strictEqual(entry.clock.id, testIdentity.publicKey)
@ -81,7 +83,7 @@ describe('Entry', function () {
const payload2 = 'hello again'
const entry1 = await create(testIdentity, 'A', payload1)
entry1.clock = tickClock(entry1.clock)
const entry2 = await create(testIdentity, 'A', payload2, entry1.clock, [entry1])
const entry2 = await create(testIdentity, 'A', payload2, null, entry1.clock, [entry1])
strictEqual(entry2.payload, payload2)
strictEqual(entry2.next.length, 1)
// strictEqual(entry2.hash, expectedHash)
@ -91,7 +93,8 @@ describe('Entry', function () {
it('`next` parameter can be an array of strings', async () => {
const entry1 = await create(testIdentity, 'A', 'hello1')
const entry2 = await create(testIdentity, 'A', 'hello2', null, [entry1.hash])
const { hash } = await Entry.encode(entry1)
const entry2 = await create(testIdentity, 'A', 'hello2', null, null, [hash])
strictEqual(typeof entry2.next[0] === 'string', true)
})
@ -138,7 +141,7 @@ describe('Entry', function () {
it('throws an error if next is not an array', async () => {
let err
try {
await create(testIdentity, 'A', 'hello', null, {})
await create(testIdentity, 'A', 'hello', null, null, {})
} catch (e) {
err = e
}

View File

@ -1,7 +1,7 @@
import { strictEqual, notStrictEqual, deepStrictEqual } from 'assert'
import { rimraf } from 'rimraf'
import { copy } from 'fs-extra'
import { Log, Entry, Identities, KeyStore } from '../../src/index.js'
import { Log, Identities, KeyStore } from '../../src/index.js'
import { Clock } from '../../src/oplog/log.js'
import { MemoryStorage } from '../../src/storage/index.js'
import testKeysPath from '../fixtures/test-keys-path.js'
@ -760,7 +760,7 @@ describe('Log - Join', async function () {
await log1.storage.merge(log0.storage)
await headsStorage1.put(e0.hash, e0.bytes)
await headsStorage1.put('heads', new TextEncoder().encode(JSON.stringify([{ hash: e0.hash, next: e0.next }])))
await log1.append('hello1')
await log1.append('hello2')
@ -863,7 +863,7 @@ describe('Log - Join', async function () {
})
describe('throws an error if verification of an entry in given entry\'s history fails', async () => {
let e1, e3
let e1
let headsStorage1, headsStorage2
before(async () => {
@ -875,23 +875,19 @@ describe('Log - Join', async function () {
e1 = await log1.append('hello1')
await log1.append('hello2')
e3 = await log1.append('hello3')
await log1.append('hello3')
})
it('throws an error if an entry doesn\'t have a payload field', async () => {
const e = Object.assign({}, e1)
delete e.payload
delete e.bytes
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
await log2.storage.merge(headsStorage1)
let err
try {
await log2.joinEntry(e3)
await log2.joinEntry(e)
} catch (e) {
err = e
}
@ -906,16 +902,12 @@ describe('Log - Join', async function () {
const e = Object.assign({}, e1)
delete e.key
delete e.bytes
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
await log2.storage.merge(headsStorage1)
let err
try {
await log2.joinEntry(e3)
await log2.joinEntry(e)
} catch (e) {
err = e
}
@ -930,16 +922,12 @@ describe('Log - Join', async function () {
const e = Object.assign({}, e1)
delete e.sig
delete e.bytes
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
await log2.storage.merge(headsStorage1)
let err
try {
await log2.joinEntry(e3)
await log2.joinEntry(e)
} catch (e) {
err = e
}
@ -953,22 +941,19 @@ describe('Log - Join', async function () {
it('throws an error if an entry signature doesn\'t verify', async () => {
const e = Object.assign({}, e1)
e.sig = '1234567890'
delete e.bytes
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
await log2.storage.merge(headsStorage1)
let err
try {
await log2.joinEntry(e3)
await log2.joinEntry(e)
} catch (e) {
err = e
}
notStrictEqual(err, undefined)
strictEqual(err.message, 'Could not validate signature for entry "zdpuAvkAJ8C46cnGdtFpcBratA5MqK7CcjqCJjjmuKuFvZir3"')
strictEqual(err.message, 'Could not validate signature for entry "zdpuAxyE4ScWLf4X6VvkhMrpDQvwdvQno1DhzY5p1U3GPHrBT"')
deepStrictEqual(await log2.all(), [])
deepStrictEqual(await log2.heads(), [])
})

View File

@ -60,15 +60,21 @@ describe('Log', function () {
})
it('sets one head if multiple are given as params', async () => {
const one = await create(testIdentity, 'A', 'entryA', null, [])
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash])
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash])
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash])
const one = await create(testIdentity, 'A', 'entryA', null, null, [])
const { hash: hash1, bytes: bytes1 } = await Entry.encode(one)
const two = await create(testIdentity, 'A', 'entryB', null, null, [hash1])
const { hash: hash2, bytes: bytes2 } = await Entry.encode(two)
const three = await create(testIdentity, 'A', 'entryC', null, null, [hash2])
const { hash: hash3, bytes: bytes3 } = await Entry.encode(three)
const four = await create(testIdentity, 'A', 'entryD', null, null, [hash3])
const { hash: hash4, bytes: bytes4 } = await Entry.encode(four)
const entryStorage = await MemoryStorage()
await entryStorage.put(one.hash, one.bytes)
await entryStorage.put(two.hash, two.bytes)
await entryStorage.put(three.hash, three.bytes)
await entryStorage.put(four.hash, four.bytes)
await entryStorage.put(hash1, bytes1)
await entryStorage.put(hash2, bytes2)
await entryStorage.put(hash3, bytes3)
await entryStorage.put(hash4, bytes4)
three.hash = hash3
two.hash = hash2
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, three, two, two], entryStorage })
const values = await log.values()
const heads = await log.heads()
@ -78,15 +84,22 @@ describe('Log', function () {
})
it('sets two heads if two given as params', async () => {
const one = await create(testIdentity, 'A', 'entryA', null, [])
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash])
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash])
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash])
const one = await create(testIdentity, 'A', 'entryA', null, null, [])
const { hash: hash1, bytes: bytes1 } = await Entry.encode(one)
const two = await create(testIdentity, 'A', 'entryB', null, null, [hash1])
const { hash: hash2, bytes: bytes2 } = await Entry.encode(two)
const three = await create(testIdentity, 'A', 'entryC', null, null, [hash2])
const { hash: hash3, bytes: bytes3 } = await Entry.encode(three)
const four = await create(testIdentity, 'A', 'entryD', null, null, [hash2])
const { hash: hash4, bytes: bytes4 } = await Entry.encode(four)
const entryStorage = await MemoryStorage()
await entryStorage.put(one.hash, one.bytes)
await entryStorage.put(two.hash, two.bytes)
await entryStorage.put(three.hash, three.bytes)
await entryStorage.put(four.hash, four.bytes)
await entryStorage.put(hash1, bytes1)
await entryStorage.put(hash2, bytes2)
await entryStorage.put(hash3, bytes3)
await entryStorage.put(hash4, bytes4)
three.hash = hash3
four.hash = hash4
two.hash = hash2
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, four, two], entryStorage })
const values = await log.values()
const heads = await log.heads()

View File

@ -69,7 +69,7 @@ describe('Log - Replication', function () {
try {
if (!messageIsFromMe(message)) {
const entry = await Entry.decode(message.detail.data)
await storage1.put(entry.hash, entry.bytes)
await storage1.put(entry.hash, message.detail.data)
await log1.joinEntry(entry)
}
} catch (e) {
@ -83,7 +83,7 @@ describe('Log - Replication', function () {
try {
if (!messageIsFromMe(message)) {
const entry = await Entry.decode(message.detail.data)
await storage2.put(entry.hash, entry.bytes)
await storage2.put(entry.hash, message.detail.data)
await log2.joinEntry(entry)
}
} catch (e) {
@ -114,8 +114,10 @@ describe('Log - Replication', function () {
for (let i = 1; i <= amount; i++) {
const entry1 = await input1.append('A' + i)
const entry2 = await input2.append('B' + i)
await ipfs1.libp2p.services.pubsub.publish(logId, entry1.bytes)
await ipfs2.libp2p.services.pubsub.publish(logId, entry2.bytes)
const bytes1 = await input1.storage.get(entry1.hash)
const bytes2 = await input1.storage.get(entry2.hash)
await ipfs1.libp2p.services.pubsub.publish(logId, bytes1)
await ipfs2.libp2p.services.pubsub.publish(logId, bytes2)
}
console.log('Messages sent')

View File

@ -0,0 +1,376 @@
import { strictEqual, notEqual } from 'assert'
import { rimraf } from 'rimraf'
import path from 'path'
import { createOrbitDB } from '../src/index.js'
import connectPeers from './utils/connect-nodes.js'
import waitFor from './utils/wait-for.js'
import createHelia from './utils/create-helia.js'
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'
import { sha256 } from 'multiformats/hashes/sha2'
import SimpleEncryption from '@orbitdb/simple-encryption'
const codec = dagCbor
const hasher = sha256
const dbPath = './orbitdb/tests/write-permissions'
describe('Encryption', function () {
this.timeout(5000)
let ipfs1, ipfs2
let orbitdb1, orbitdb2
let db1, db2
let replicationEncryption
let dataEncryption
before(async () => {
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
await connectPeers(ipfs1, ipfs2)
await rimraf('./orbitdb')
orbitdb1 = await createOrbitDB({ ipfs: ipfs1, id: 'user1', directory: path.join(dbPath, '1') })
orbitdb2 = await createOrbitDB({ ipfs: ipfs2, id: 'user2', directory: path.join(dbPath, '2') })
replicationEncryption = await SimpleEncryption({ password: 'hello' })
dataEncryption = await SimpleEncryption({ password: 'world' })
})
after(async () => {
if (orbitdb1) {
await orbitdb1.stop()
}
if (orbitdb2) {
await orbitdb2.stop()
}
if (ipfs1) {
await ipfs1.stop()
}
if (ipfs2) {
await ipfs2.stop()
}
await rimraf('./orbitdb')
await rimraf('./ipfs1')
await rimraf('./ipfs2')
})
describe('Data is encrypted when replicated to peers', async () => {
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
if (db2) {
await db2.drop()
await db2.close()
}
})
it('encrypts/decrypts data', async () => {
let connected = false
let updated = false
let error = false
const encryption = {
data: dataEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onUpdate = async (peerId, heads) => {
updated = true
}
db2.events.on('update', onUpdate)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db2.events.on('error', onError)
const hash1 = await db1.add('record 1')
const hash2 = await db1.add('record 2')
strictEqual(await db1.get(hash1), 'record 1')
strictEqual(await db1.get(hash2), 'record 2')
await waitFor(() => updated || error, () => true)
const all = await db2.all()
strictEqual(all.length, 2)
strictEqual(all[0].value, 'record 1')
strictEqual(all[1].value, 'record 2')
})
it('encrypts/decrypts log', async () => {
let connected = false
let updated = false
let error = false
const encryption = {
replication: replicationEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onUpdate = async (peerId, heads) => {
updated = true
}
db2.events.on('update', onUpdate)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db2.events.on('error', onError)
const hash1 = await db1.add('record 1')
const hash2 = await db1.add('record 2')
strictEqual(await db1.get(hash1), 'record 1')
strictEqual(await db1.get(hash2), 'record 2')
await waitFor(() => updated || error, () => true)
const all = await db2.all()
strictEqual(all.length, 2)
strictEqual(all[0].value, 'record 1')
strictEqual(all[1].value, 'record 2')
})
it('encrypts/decrypts log and data', async () => {
let connected = false
let updated = false
let error = false
const encryption = {
replication: replicationEncryption,
data: dataEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onUpdate = async (peerId, heads) => {
updated = true
}
db2.events.on('update', onUpdate)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db2.events.on('error', onError)
const hash1 = await db1.add('record 1')
const hash2 = await db1.add('record 2')
strictEqual(await db1.get(hash1), 'record 1')
strictEqual(await db1.get(hash2), 'record 2')
await waitFor(() => updated || error, () => true)
const all = await db2.all()
strictEqual(all.length, 2)
strictEqual(all[0].value, 'record 1')
strictEqual(all[1].value, 'record 2')
})
it('throws an error if log can\'t be decrypted', async () => {
let connected = false
let hasError = false
let error
const replicationEncryptionWithFailure = await SimpleEncryption({ password: 'goodbye' })
const encryption = {
replication: replicationEncryption
}
const encryptionWithFailure = {
replication: replicationEncryptionWithFailure
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption: encryptionWithFailure })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
error = err
hasError = true
}
db2.events.on('error', onError)
await db1.add('record 1')
await waitFor(() => hasError, () => true)
strictEqual(error.message, 'Could not decrypt entry')
const all = await db2.all()
strictEqual(all.length, 0)
})
it('throws an error if data can\'t be decrypted', async () => {
let connected = false
let hasError = false
let error
const dataEncryptionWithFailure = await SimpleEncryption({ password: 'goodbye' })
const encryption = {
data: dataEncryption
}
const encryptionWithFailure = {
data: dataEncryptionWithFailure
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption: encryptionWithFailure })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
error = err
hasError = true
}
db2.events.on('error', onError)
await db1.add('record 1')
await waitFor(() => hasError, () => true)
strictEqual(error.message, 'Could not decrypt payload')
const all = await db2.all()
strictEqual(all.length, 0)
})
})
describe('Data is encrypted in storage', async () => {
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
})
it('payload bytes are encrypted in storage', async () => {
let error
const encryption = {
data: dataEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db1.events.on('error', onError)
const hash1 = await db1.add('record 1')
const bytes = await db1.log.storage.get(hash1)
const { value } = await Block.decode({ bytes, codec, hasher })
const payload = value.payload
strictEqual(payload.constructor, Uint8Array)
try {
await Block.decode({ bytes: payload, codec, hasher })
} catch (e) {
error = e
}
strictEqual(error.message.startsWith('CBOR decode error'), true)
})
it('entry bytes are encrypted in storage', async () => {
let error
const encryption = {
replication: replicationEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db1.events.on('error', onError)
const hash1 = await db1.add('record 1')
let decodedBytes
try {
const bytes = await db1.log.storage.get(hash1)
decodedBytes = await Block.decode({ bytes, codec, hasher })
await Block.decode({ bytes: decodedBytes, codec, hasher })
} catch (e) {
error = e
}
notEqual(error, undefined)
strictEqual(error.message.startsWith('CBOR decode error'), true)
strictEqual(decodedBytes.value.constructor, Uint8Array)
})
})
})

View File

@ -139,8 +139,7 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog111', entryStorage: entryStorage1 })
log2 = await Log(testIdentity2, { logId: 'synclog111', entryStorage: entryStorage2 })
const onSynced = async (bytes) => {
const entry = await Entry.decode(bytes)
const onSynced = async (entry) => {
if (await log2.joinEntry(entry)) {
syncedHead = entry
syncedEventFired = true
@ -207,8 +206,7 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog7', entryStorage: entryStorage1 })
log2 = await Log(testIdentity2, { logId: 'synclog7', entryStorage: entryStorage2 })
const onSynced = async (bytes) => {
const entry = await Entry.decode(bytes)
const onSynced = async (entry) => {
if (await log2.joinEntry(entry)) {
syncedHead = entry
}
@ -291,8 +289,8 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog1' })
log2 = await Log(testIdentity2, { logId: 'synclog1' })
const onSynced = async (bytes) => {
syncedHead = await Entry.decode(bytes)
const onSynced = async (entry) => {
syncedHead = entry
syncedEventFired = expectedEntry.hash === syncedHead.hash
}
@ -348,8 +346,8 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog1' })
log2 = await Log(testIdentity2, { logId: 'synclog1' })
const onSynced = async (bytes) => {
syncedHead = await Entry.decode(bytes)
const onSynced = async (entry) => {
syncedHead = entry
if (expectedEntry) {
syncedEventFired = expectedEntry.hash === syncedHead.hash
}
@ -434,9 +432,9 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog1' })
log2 = await Log(testIdentity2, { logId: 'synclog1' })
const onSynced = async (bytes) => {
const onSynced = async (entry) => {
if (expectedEntry && !syncedEventFired) {
syncedHead = await Entry.decode(bytes)
syncedHead = entry
syncedEventFired = expectedEntry.hash === syncedHead.hash
}
}
@ -518,8 +516,8 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog2' })
log2 = await Log(testIdentity2, { logId: 'synclog2' })
const onSynced = async (bytes) => {
syncedHead = await Entry.decode(bytes)
const onSynced = async (entry) => {
syncedHead = entry
if (expectedEntry) {
syncedEventFired = expectedEntry ? expectedEntry.hash === syncedHead.hash : false
}
@ -665,7 +663,7 @@ describe('Sync protocol', function () {
let sync1, sync2
let log1, log2
const timeoutTime = 1 // 1 millisecond
const timeoutTime = 0 // 0 milliseconds
before(async () => {
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
@ -701,13 +699,13 @@ describe('Sync protocol', function () {
let err = null
const onError = (error) => {
(!err) && (err = error)
err ??= error
}
sync1 = await Sync({ ipfs: ipfs1, log: log1, timeout: timeoutTime })
sync2 = await Sync({ ipfs: ipfs2, log: log2, start: false, timeout: timeoutTime })
sync1.events.on('error', onError)
sync2 = await Sync({ ipfs: ipfs2, log: log2, start: false, timeout: timeoutTime })
sync2.events.on('error', onError)
await log1.append('hello1')
@ -718,7 +716,7 @@ describe('Sync protocol', function () {
notStrictEqual(err, null)
strictEqual(err.type, 'aborted')
strictEqual(err.message, 'Read aborted')
strictEqual(err.message.includes('aborted'), true)
})
})
})

View File

@ -32,7 +32,6 @@ const server = await createLibp2p({
relay: circuitRelayServer({
reservations: {
maxReservations: 5000,
reservationTtl: 1000,
defaultDataLimit: BigInt(1024 * 1024 * 1024)
}
})