Merge pull request #1229 from orbitdb/feat/encryption-2

Feat/encryption 2
This commit is contained in:
Hayden Young 2025-05-26 18:21:06 +02:00 committed by GitHub
commit 0cabfdbae7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 2120 additions and 3446 deletions

View File

@ -18,19 +18,3 @@ jobs:
run: npm run lint run: npm run lint
- name: Run tests - name: Run tests
run: npm run test:ci run: npm run test:ci
test-browser:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 20
registry-url: https://registry.npmjs.org/
- name: Install dependencies
run: npm ci
- name: Run linter
run: npm run lint
- name: Run a webrtc relay in the background
run: npm run webrtc:background
- name: Run browser tests
run: npm run test:browser

View File

@ -109,6 +109,7 @@ You can find more advanced topics in our [docs](https://github.com/orbitdb/orbit
- [Connecting Peers](https://github.com/orbitdb/orbitdb/blob/main/docs/CONNECTING_PEERS.md) - [Connecting Peers](https://github.com/orbitdb/orbitdb/blob/main/docs/CONNECTING_PEERS.md)
- [Replication](https://github.com/orbitdb/orbitdb/blob/main/docs/REPLICATION.md) - [Replication](https://github.com/orbitdb/orbitdb/blob/main/docs/REPLICATION.md)
- [Oplog](https://github.com/orbitdb/orbitdb/blob/main/docs/OPLOG.md) - [Oplog](https://github.com/orbitdb/orbitdb/blob/main/docs/OPLOG.md)
- [Encryption](https://github.com/orbitdb/orbitdb/blob/main/docs/ENCRYPTION.md)
### API ### API

View File

@ -45,8 +45,10 @@ EventEmitter.defaultMaxListeners = 10000
let connected = false let connected = false
const onJoin = async (peerId) => (connected = true) const onJoin = async (peerId) => (connected = true)
const onError = async (err) => console.error(err)
db2.events.on('join', onJoin) db2.events.on('join', onJoin)
db2.events.on('error', onError)
await waitFor(() => connected, () => true) await waitFor(() => connected, () => true)

View File

@ -38,6 +38,8 @@ export default (env, argv) => {
], ],
fallback: { fallback: {
path: require.resolve('path-browserify'), path: require.resolve('path-browserify'),
crypto: false,
stream: require.resolve('stream-browserify'),
process: false process: false
} }
}, },

94
docs/ENCRYPTION.md Normal file
View File

@ -0,0 +1,94 @@
# Encryption
OrbitDB features a modular architecture for database encryption. By passing a module to an OrbitDB database, different encryption methods can be employed.
OrbitDB project currently maintains a [SimpleEncryption](https://github.com/orbitdb/simple-encryption) module that can be used to get started with encrypted databases.
**WARNING:** SimpleEncryption is an unaudited encryption module. Use at your own risk.
## How it works
OrbitDB encrypts databases in two layers: encrypting the payload and encrypting the log entry.
Log entry encryption encrypts each log entry fully. Payload encryption encrypts just the value.
This makes it possible to separate users of a database and replicators of a database, ie. an orbitdb peer can replicate a database without being able to decrypt the payloads (=data) of the database.
## Configuring encryption
You can configure OrbitDB to encrypt either the payload data being stored or the entire database.
To ***encrypt payload data only***, specify an encryption module and pass it to OtbiDB via the encryption object using the `data` property:
```
const data = await EncryptionModule()
const encryption = { data }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
To ***encrypt the database log entries***, specify an encryption module and pass it to OrbitDB via the encryption object using the `replication` property:
```
const replication = await EncryptionModule()
const encryption = { replication }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
To ***encrypt the whole database***, payload data and oplog entries separately, specify an encryption module and pass it to OrbitDB via the encryption object using both the `replication` and `data` properties:
```
const replication = await EncryptionModule()
const data = await EncryptionModule()
const encryption = { replication, data }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
## Encrypting Databases
OrbitDB provides simple password-based encryption via an external module called [SimpleEncryption](https://github.com/orbitdb/simple-encryption).
**WARNING:** This is an unaudited encryption module. Use at your own risk.
To install SimpleEncryption:
```
npm i @orbitdb/simple-encryption
```
To use encryption, initiate SimpleEncryption with a password and pass it to OrbitDB when opening your database:
```js
import { SimpleEncryption } from '@orbitdb/simple-encryption'
const replication = await SimpleEncryption({ password: 'hello' })
const data = await SimpleEncryption({ password: 'world' })
const encryption = { data, replication }
const db = await orbitdb.open(dbNameOrAddress, { encryption })
```
If you wish to use another encryption type, simply replace SimpleEncryption with the module of your choice.
## Custom Encryption
To implement a custom encryption module for OrbitDB, expose encrypt and decrypt functions:
```
const CustomEncryption = async () => {
const encrypt = (value) => {
// return encrypted value
}
const decrypt = (value) => {
// return decrypted value
}
return {
encrypt,
decrypt
}
}
export default CustomEncryption
```

View File

@ -307,3 +307,5 @@ These kinds of connectivity configurations are beyond the scope of OrbitDB. To f
[Databases](./DATABASES.md) covers database management and data entry in more detail. [Databases](./DATABASES.md) covers database management and data entry in more detail.
[Replication](./REPLICATION.md) provides a comprehensive overview of how to perform data replication across multiple peers. [Replication](./REPLICATION.md) provides a comprehensive overview of how to perform data replication across multiple peers.
[Encryption](./ENCRYPTION.md) discusses database encryption using OrbitDB's modular architecture.

4365
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{ {
"name": "@orbitdb/core", "name": "@orbitdb/core",
"version": "2.5.0", "version": "3.0.0",
"description": "Distributed p2p database on IPFS", "description": "Distributed p2p database on IPFS",
"author": "Haad", "author": "Haad",
"license": "MIT", "license": "MIT",
@ -31,16 +31,17 @@
"devDependencies": { "devDependencies": {
"@chainsafe/libp2p-gossipsub": "^14.1.0", "@chainsafe/libp2p-gossipsub": "^14.1.0",
"@libp2p/circuit-relay-v2": "^3.1.0", "@libp2p/circuit-relay-v2": "^3.1.0",
"@orbitdb/simple-encryption": "^0.0.1",
"blockstore-level": "^2.0.1", "blockstore-level": "^2.0.1",
"c8": "^8.0.1", "c8": "^8.0.1",
"cross-env": "^7.0.3", "cross-env": "^7.0.3",
"fs-extra": "^11.2.0", "fs-extra": "^11.2.0",
"helia": "^5.2.1", "helia": "^5.3.0",
"it-all": "^3.0.4", "it-all": "^3.0.4",
"jsdoc": "^4.0.2", "jsdoc": "^4.0.2",
"mocha": "^10.2.0", "mocha": "^10.2.0",
"path-browserify": "^1.0.1", "path-browserify": "^1.0.1",
"playwright-test": "^14.1.6", "playwright-test": "^14.1.9",
"rimraf": "^5.0.5", "rimraf": "^5.0.5",
"standard": "^17.1.0", "standard": "^17.1.0",
"webpack": "^5.89.0", "webpack": "^5.89.0",

View File

@ -7,7 +7,7 @@
import { EventEmitter } from 'events' import { EventEmitter } from 'events'
import PQueue from 'p-queue' import PQueue from 'p-queue'
import Sync from './sync.js' import Sync from './sync.js'
import { Log, Entry } from './oplog/index.js' import { Log } from './oplog/index.js'
import { ComposedStorage, LRUStorage, IPFSBlockStorage, LevelStorage } from './storage/index.js' import { ComposedStorage, LRUStorage, IPFSBlockStorage, LevelStorage } from './storage/index.js'
import pathJoin from './utils/path-join.js' import pathJoin from './utils/path-join.js'
@ -39,10 +39,12 @@ const defaultCacheSize = 1000
* automatically. Otherwise, false. * automatically. Otherwise, false.
* @param {function} [params.onUpdate] A function callback. Fired when an * @param {function} [params.onUpdate] A function callback. Fired when an
* entry is added to the oplog. * entry is added to the oplog.
* @param {Function} options.encryptFn An encryption function.
* @param {Function} options.decryptFn A decryption function.
* @return {module:Databases~Database} An instance of Database. * @return {module:Databases~Database} An instance of Database.
* @instance * @instance
*/ */
const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => { const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
/** /**
* @namespace module:Databases~Database * @namespace module:Databases~Database
* @description The instance returned by {@link module:Database~Database}. * @description The instance returned by {@link module:Database~Database}.
@ -108,7 +110,9 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
await LevelStorage({ path: pathJoin(directory, '/log/_index/') }) await LevelStorage({ path: pathJoin(directory, '/log/_index/') })
) )
const log = await Log(identity, { logId: address, access, entryStorage, headsStorage, indexStorage }) encryption = encryption || {}
const log = await Log(identity, { logId: address, access, entryStorage, headsStorage, indexStorage, encryption })
const events = new EventEmitter() const events = new EventEmitter()
@ -134,13 +138,12 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
return entry.hash return entry.hash
} }
const hash = await queue.add(task) const hash = await queue.add(task)
await queue.onIdle()
return hash return hash
} }
const applyOperation = async (bytes) => { const applyOperation = async (entry) => {
const task = async () => { const task = async () => {
const entry = await Entry.decode(bytes) try {
if (entry) { if (entry) {
const updated = await log.joinEntry(entry) const updated = await log.joinEntry(entry)
if (updated) { if (updated) {
@ -150,6 +153,9 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
events.emit('update', entry) events.emit('update', entry)
} }
} }
} catch (e) {
console.error(e)
}
} }
await queue.add(task) await queue.add(task)
} }
@ -177,7 +183,7 @@ const Database = async ({ ipfs, identity, address, name, access, directory, meta
* @async * @async
*/ */
const drop = async () => { const drop = async () => {
await queue.onIdle() await queue.clear()
await log.clear() await log.clear()
if (access && access.drop) { if (access && access.drop) {
await access.drop() await access.drop()

View File

@ -25,8 +25,8 @@ const DefaultOptions = { indexBy: '_id' }
* @return {module:Databases.Databases-Documents} A Documents function. * @return {module:Databases.Databases-Documents} A Documents function.
* @memberof module:Databases * @memberof module:Databases
*/ */
const Documents = ({ indexBy } = DefaultOptions) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => { const Documents = ({ indexBy } = DefaultOptions) => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encrypt }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically }) const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, encrypt })
const { addOperation, log } = database const { addOperation, log } = database

View File

@ -15,8 +15,8 @@ const type = 'events'
* @return {module:Databases.Databases-Events} A Events function. * @return {module:Databases.Databases-Events} A Events function.
* @memberof module:Databases * @memberof module:Databases
*/ */
const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => { const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption })
const { addOperation, log } = database const { addOperation, log } = database

View File

@ -109,7 +109,7 @@ const Index = ({ directory } = {}) => async () => {
* function. * function.
* @memberof module:Databases * @memberof module:Databases
*/ */
const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => { const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
// Set up the directory for an index // Set up the directory for an index
directory = pathJoin(directory || './orbitdb', `./${address}/_index/`) directory = pathJoin(directory || './orbitdb', `./${address}/_index/`)
@ -117,7 +117,7 @@ const KeyValueIndexed = () => async ({ ipfs, identity, address, name, access, di
const index = await Index({ directory })() const index = await Index({ directory })()
// Set up the underlying KeyValue database // Set up the underlying KeyValue database
const keyValueStore = await KeyValue()({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate: index.update }) const keyValueStore = await KeyValue()({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate: index.update, encryption })
/** /**
* Gets a value from the store by key. * Gets a value from the store by key.

View File

@ -15,8 +15,8 @@ const type = 'keyvalue'
* @return {module:Databases.Databases-KeyValue} A KeyValue function. * @return {module:Databases.Databases-KeyValue} A KeyValue function.
* @memberof module:Databases * @memberof module:Databases
*/ */
const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => { const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption }) => {
const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) const database = await Database({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate, encryption })
const { addOperation, log } = database const { addOperation, log } = database

View File

@ -55,7 +55,7 @@ const hashStringEncoding = base58btc
* // { payload: "hello", next: [], ... } * // { payload: "hello", next: [], ... }
* @private * @private
*/ */
const create = async (identity, id, payload, clock = null, next = [], refs = []) => { const create = async (identity, id, payload, encryptPayloadFn, clock = null, next = [], refs = []) => {
if (identity == null) throw new Error('Identity is required, cannot create entry') if (identity == null) throw new Error('Identity is required, cannot create entry')
if (id == null) throw new Error('Entry requires an id') if (id == null) throw new Error('Entry requires an id')
if (payload == null) throw new Error('Entry requires a payload') if (payload == null) throw new Error('Entry requires a payload')
@ -63,9 +63,16 @@ const create = async (identity, id, payload, clock = null, next = [], refs = [])
clock = clock || Clock(identity.publicKey) clock = clock || Clock(identity.publicKey)
let encryptedPayload
if (encryptPayloadFn) {
const { bytes: encodedPayloadBytes } = await Block.encode({ value: payload, codec, hasher })
encryptedPayload = await encryptPayloadFn(encodedPayloadBytes)
}
const entry = { const entry = {
id, // For determining a unique chain id, // For determining a unique chain
payload, // Can be any dag-cbor encodeable data payload: encryptedPayload || payload, // Can be any dag-cbor encodeable data
next, // Array of strings of CIDs next, // Array of strings of CIDs
refs, // Array of strings of CIDs refs, // Array of strings of CIDs
clock, // Clock clock, // Clock
@ -78,8 +85,13 @@ const create = async (identity, id, payload, clock = null, next = [], refs = [])
entry.key = identity.publicKey entry.key = identity.publicKey
entry.identity = identity.hash entry.identity = identity.hash
entry.sig = signature entry.sig = signature
entry.payload = payload
return encode(entry) if (encryptPayloadFn) {
entry._payload = encryptedPayload
}
return entry
} }
/** /**
@ -97,13 +109,15 @@ const verify = async (identities, entry) => {
if (!entry.key) throw new Error("Entry doesn't have a key") if (!entry.key) throw new Error("Entry doesn't have a key")
if (!entry.sig) throw new Error("Entry doesn't have a signature") if (!entry.sig) throw new Error("Entry doesn't have a signature")
const e = Object.assign({}, entry)
const value = { const value = {
id: entry.id, id: e.id,
payload: entry.payload, payload: e._payload || e.payload,
next: entry.next, next: e.next,
refs: entry.refs, refs: e.refs,
clock: entry.clock, clock: e.clock,
v: entry.v v: e.v
} }
const { bytes } = await Block.encode({ value, codec, hasher }) const { bytes } = await Block.encode({ value, codec, hasher })
@ -136,7 +150,7 @@ const isEntry = (obj) => {
* @private * @private
*/ */
const isEqual = (a, b) => { const isEqual = (a, b) => {
return a && b && a.hash === b.hash return a && b && a.hash && a.hash === b.hash
} }
/** /**
@ -146,13 +160,39 @@ const isEqual = (a, b) => {
* @memberof module:Log~Entry * @memberof module:Log~Entry
* @private * @private
*/ */
const decode = async (bytes) => { const decode = async (bytes, decryptEntryFn, decryptPayloadFn) => {
const { cid, value } = await Block.decode({ bytes, codec, hasher }) let cid
if (decryptEntryFn) {
try {
const encryptedEntry = await Block.decode({ bytes, codec, hasher })
bytes = await decryptEntryFn(encryptedEntry.value)
cid = encryptedEntry.cid
} catch (e) {
throw new Error('Could not decrypt entry')
}
}
const decodedEntry = await Block.decode({ bytes, codec, hasher })
const entry = decodedEntry.value
if (decryptPayloadFn) {
try {
const decryptedPayloadBytes = await decryptPayloadFn(entry.payload)
const { value: decryptedPayload } = await Block.decode({ bytes: decryptedPayloadBytes, codec, hasher })
entry._payload = entry.payload
entry.payload = decryptedPayload
} catch (e) {
throw new Error('Could not decrypt payload')
}
}
cid = cid || decodedEntry.cid
const hash = cid.toString(hashStringEncoding) const hash = cid.toString(hashStringEncoding)
return { return {
...value, ...entry,
hash, hash
bytes
} }
} }
@ -163,13 +203,28 @@ const decode = async (bytes) => {
* @memberof module:Log~Entry * @memberof module:Log~Entry
* @private * @private
*/ */
const encode = async (entry) => { const encode = async (entry, encryptEntryFn, encryptPayloadFn) => {
const { cid, bytes } = await Block.encode({ value: entry, codec, hasher }) const e = Object.assign({}, entry)
if (encryptPayloadFn) {
e.payload = e._payload
}
delete e._payload
delete e.hash
let { cid, bytes } = await Block.encode({ value: e, codec, hasher })
if (encryptEntryFn) {
bytes = await encryptEntryFn(bytes)
const encryptedEntry = await Block.encode({ value: bytes, codec, hasher })
cid = encryptedEntry.cid
bytes = encryptedEntry.bytes
}
const hash = cid.toString(hashStringEncoding) const hash = cid.toString(hashStringEncoding)
const clock = Clock(entry.clock.id, entry.clock.time)
return { return {
...entry,
clock,
hash, hash,
bytes bytes
} }

View File

@ -9,19 +9,17 @@ import MemoryStorage from '../storage/memory.js'
const DefaultStorage = MemoryStorage const DefaultStorage = MemoryStorage
const Heads = async ({ storage, heads }) => { const Heads = async ({ storage, heads, decryptPayloadFn, decryptEntryFn }) => {
storage = storage || await DefaultStorage() storage = storage || await DefaultStorage()
const encoder = new TextEncoder()
const decoder = new TextDecoder()
const put = async (heads) => { const put = async (heads) => {
heads = findHeads(heads) heads = findHeads(heads)
for (const head of heads) { const newHeads = heads.map(e => ({ hash: e.hash, next: e.next }))
await storage.put(head.hash, head.bytes) const bytes = encoder.encode(JSON.stringify(newHeads))
} await storage.put('heads', bytes)
}
const set = async (heads) => {
await storage.clear()
await put(heads)
} }
const add = async (head) => { const add = async (head) => {
@ -30,22 +28,21 @@ const Heads = async ({ storage, heads }) => {
return return
} }
const newHeads = findHeads([...currentHeads, head]) const newHeads = findHeads([...currentHeads, head])
await set(newHeads) await put(newHeads)
return newHeads return newHeads
} }
const remove = async (hash) => { const remove = async (hash) => {
const currentHeads = await all() const currentHeads = await all()
const newHeads = currentHeads.filter(e => e.hash !== hash) const newHeads = currentHeads.filter(e => e.hash !== hash)
await set(newHeads) await put(newHeads)
} }
const iterator = async function * () { const iterator = async function * () {
const it = storage.iterator() const bytes = await storage.get('heads')
for await (const [, bytes] of it) { const headHashes = bytes ? JSON.parse(decoder.decode(bytes)) : []
const head = await Entry.decode(bytes) for (const hash of headHashes) {
yield head yield hash
} }
} }
@ -66,11 +63,13 @@ const Heads = async ({ storage, heads }) => {
} }
// Initialize the heads if given as parameter // Initialize the heads if given as parameter
await put(heads || []) if (heads) {
await put(heads)
}
return { return {
put, put,
set, set: put,
add, add,
remove, remove,
iterator, iterator,

View File

@ -10,18 +10,14 @@ import LRU from 'lru'
import PQueue from 'p-queue' import PQueue from 'p-queue'
import Entry from './entry.js' import Entry from './entry.js'
import Clock, { tickClock } from './clock.js' import Clock, { tickClock } from './clock.js'
import Heads from './heads.js'
import ConflictResolution from './conflict-resolution.js' import ConflictResolution from './conflict-resolution.js'
import MemoryStorage from '../storage/memory.js' import OplogStore from './oplog-store.js'
const { LastWriteWins, NoZeroes } = ConflictResolution const { LastWriteWins, NoZeroes } = ConflictResolution
const randomId = () => new Date().getTime().toString() const randomId = () => new Date().getTime().toString()
const maxClockTimeReducer = (res, acc) => Math.max(res, acc.clock.time) const maxClockTimeReducer = (res, acc) => Math.max(res, acc.clock.time)
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
const DefaultStorage = MemoryStorage
// Default AccessController for the Log. // Default AccessController for the Log.
// Default policy is that anyone can write to the Log. // Default policy is that anyone can write to the Log.
// Signature of an entry will always be verified regardless of AccessController policy. // Signature of an entry will always be verified regardless of AccessController policy.
@ -56,7 +52,7 @@ const DefaultAccessController = async () => {
* @memberof module:Log * @memberof module:Log
* @instance * @instance
*/ */
const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn } = {}) => { const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn, encryption } = {}) => {
/** /**
* @namespace Log * @namespace Log
* @description The instance returned by {@link module:Log} * @description The instance returned by {@link module:Log}
@ -68,20 +64,23 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
if (logHeads != null && !Array.isArray(logHeads)) { if (logHeads != null && !Array.isArray(logHeads)) {
throw new Error('\'logHeads\' argument must be an array') throw new Error('\'logHeads\' argument must be an array')
} }
// Set Log's id // Set Log's id
const id = logId || randomId() const id = logId || randomId()
// Encryption of entries and payloads
encryption = encryption || {}
const encryptPayloadFn = encryption.data?.encrypt
// Access Controller // Access Controller
access = access || await DefaultAccessController() access = access || await DefaultAccessController()
// Oplog entry storage
const _entries = entryStorage || await DefaultStorage() // Index and storage of entries for this Log
// Entry index for keeping track which entries are already in the log const oplogStore = await OplogStore({ logHeads, entryStorage, indexStorage, headsStorage, encryption })
const _index = indexStorage || await DefaultStorage()
// Heads storage
headsStorage = headsStorage || await DefaultStorage()
// Add heads to the state storage, ie. init the log state
const _heads = await Heads({ storage: headsStorage, heads: logHeads })
// Conflict-resolution sorting function // Conflict-resolution sorting function
sortFn = NoZeroes(sortFn || LastWriteWins) sortFn = NoZeroes(sortFn || LastWriteWins)
// Internal queues for processing appends and joins in their call-order // Internal queues for processing appends and joins in their call-order
const appendQueue = new PQueue({ concurrency: 1 }) const appendQueue = new PQueue({ concurrency: 1 })
const joinQueue = new PQueue({ concurrency: 1 }) const joinQueue = new PQueue({ concurrency: 1 })
@ -106,8 +105,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance * @instance
*/ */
const heads = async () => { const heads = async () => {
const res = await _heads.all() const heads_ = await oplogStore.heads()
return res.sort(sortFn).reverse() return heads_.sort(sortFn).reverse()
} }
/** /**
@ -134,16 +133,14 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance * @instance
*/ */
const get = async (hash) => { const get = async (hash) => {
const bytes = await _entries.get(hash) if (!hash) {
if (bytes) { throw new Error('hash is required')
const entry = await Entry.decode(bytes)
return entry
} }
return oplogStore.get(hash)
} }
const has = async (hash) => { const has = async (hash) => {
const entry = await _index.get(hash) return oplogStore.has(hash)
return entry != null
} }
/** /**
@ -162,6 +159,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
// 2. Authorize entry // 2. Authorize entry
// 3. Store entry // 3. Store entry
// 4. return Entry // 4. return Entry
// Get current heads of the log // Get current heads of the log
const heads_ = await heads() const heads_ = await heads()
// Create the next pointers from heads // Create the next pointers from heads
@ -169,29 +167,29 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
// Get references (pointers) to multiple entries in the past // Get references (pointers) to multiple entries in the past
// (skips the heads which are covered by the next field) // (skips the heads which are covered by the next field)
const refs = await getReferences(heads_, options.referencesCount + heads_.length) const refs = await getReferences(heads_, options.referencesCount + heads_.length)
// Create the entry // Create the entry
const entry = await Entry.create( const entry = await Entry.create(
identity, identity,
id, id,
data, data,
encryptPayloadFn,
tickClock(await clock()), tickClock(await clock()),
nexts, nexts,
refs refs
) )
// Authorize the entry // Authorize the entry
const canAppend = await access.canAppend(entry) const canAppend = await access.canAppend(entry)
if (!canAppend) { if (!canAppend) {
throw new Error(`Could not append entry:\nKey "${identity.hash}" is not allowed to write to the log`) throw new Error(`Could not append entry:\nKey "${identity.hash}" is not allowed to write to the log`)
} }
// The appended entry is now the latest head // Add the entry to the oplog store (=store and index it)
await _heads.set([entry]) const hash = await oplogStore.setHead(entry)
// Add entry to the entry storage
await _entries.put(entry.hash, entry.bytes)
// Add entry to the entry index
await _index.put(entry.hash, true)
// Return the appended entry // Return the appended entry
return entry return { ...entry, hash }
} }
return appendQueue.add(task) return appendQueue.add(task)
@ -218,9 +216,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
if (!isLog(log)) { if (!isLog(log)) {
throw new Error('Given argument is not an instance of Log') throw new Error('Given argument is not an instance of Log')
} }
if (_entries.merge) { await oplogStore.storage.merge(log.storage)
await _entries.merge(log.storage)
}
const heads = await log.heads() const heads = await log.heads()
for (const entry of heads) { for (const entry of heads) {
await joinEntry(entry) await joinEntry(entry)
@ -302,21 +298,12 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
await traverseAndVerify() await traverseAndVerify()
/* 4. Add missing entries to the index (=to the log) */ /* 4. Add missing entries to the oplog store (=to the log) */
for (const hash of hashesToAdd.values()) { await oplogStore.addVerified(hashesToAdd.values())
await _index.put(hash, true) /* 6. Remove heads which new entries are connect to */
} await oplogStore.removeHeads(connectedHeads.values())
/* 7. Add the new entry to heads (=union with current heads) */
/* 5. Remove heads which new entries are connect to */ await oplogStore.addHead(entry)
for (const hash of connectedHeads.values()) {
await _heads.remove(hash)
}
/* 6. Add new entry to entries (for pinning) */
await _entries.put(entry.hash, entry.bytes)
/* 6. Add the new entry to heads (=union with current heads) */
await _heads.add(entry)
return true return true
} }
@ -501,9 +488,9 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance * @instance
*/ */
const clear = async () => { const clear = async () => {
await _index.clear() await appendQueue.clear()
await _heads.clear() await joinQueue.clear()
await _entries.clear() await oplogStore.clear()
} }
/** /**
@ -512,9 +499,9 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @instance * @instance
*/ */
const close = async () => { const close = async () => {
await _index.close() await appendQueue.onIdle()
await _heads.close() await joinQueue.onIdle()
await _entries.close() await oplogStore.close()
} }
/** /**
@ -570,7 +557,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
close, close,
access, access,
identity, identity,
storage: _entries storage: oplogStore.storage,
encryption
} }
} }

112
src/oplog/oplog-store.js Normal file
View File

@ -0,0 +1,112 @@
import Entry from './entry.js'
import Heads from './heads.js'
import MemoryStorage from '../storage/memory.js'
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
const DefaultStorage = MemoryStorage
const OplogStore = async ({ logHeads, entryStorage, headsStorage, indexStorage, encryption }) => {
// Setup encryption and decryption functions
const encryptEntryFn = encryption?.replication?.encrypt
const decryptEntryFn = encryption?.replication?.decrypt
const encryptPayloadFn = encryption?.data?.encrypt
const decryptPayloadFn = encryption?.data?.decrypt
// Oplog entry storage
const _entries = entryStorage || await DefaultStorage()
// Entry index for keeping track which entries are already in the log
const _index = indexStorage || await DefaultStorage()
// Heads storage
headsStorage = headsStorage || await DefaultStorage()
// Add heads to the state storage, ie. init the log state
const _heads = await Heads({ storage: headsStorage, heads: logHeads, decryptPayloadFn, decryptEntryFn })
const get = async (hash) => {
const bytes = await _entries.get(hash)
if (bytes) {
const entry = await Entry.decode(bytes, decryptEntryFn, decryptPayloadFn)
return entry
}
}
const getBytes = async (hash) => {
return _entries.get(hash)
}
const has = async (hash) => {
const entry = await _index.get(hash)
return entry != null
}
const heads = async () => {
const heads_ = []
for (const { hash } of await _heads.all()) {
const head = await get(hash)
heads_.push(head)
}
return heads_
}
const setHead = async (entry) => {
const { hash, bytes } = await Entry.encode(entry, encryptEntryFn, encryptPayloadFn)
// Add entry to the entry storage
await _entries.put(hash, bytes)
// Add entry to the entry index
await _index.put(hash, true)
// The appended entry is now the latest head
await _heads.set([{ hash, next: entry.next }])
return hash
}
const addHead = async (entry) => {
/* 7. Add the new entry to heads (=union with current heads) */
await _heads.add(entry)
return entry.hash
}
const removeHeads = async (hashes) => {
/* 5. Remove heads which new entries are connect to */
for (const hash of hashes) {
await _heads.remove(hash)
}
}
const addVerified = async (hashes) => {
/* 4. Add missing entries to the index (=to the log) */
for (const hash of hashes) {
await _index.put(hash, true)
/* 5. Add new entry to entries (for pinning) */
if (_entries.persist) {
await _entries.persist(hash)
}
}
}
const clear = async () => {
await _index.clear()
await _heads.clear()
await _entries.clear()
}
const close = async () => {
await _index.close()
await _heads.close()
await _entries.close()
}
return {
get,
getBytes,
has,
heads,
setHead,
addHead,
removeHeads,
addVerified,
storage: _entries,
clear,
close
}
}
export default OplogStore

View File

@ -104,6 +104,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
* @param {module:Storage} [params.indexStorage=[ComposedStorage]{@link module:Storage.Storage-Composed}] A compatible storage instance for storing an " index of log entries. Defaults to ComposedStorage(LRUStorage, LevelStorage). * @param {module:Storage} [params.indexStorage=[ComposedStorage]{@link module:Storage.Storage-Composed}] A compatible storage instance for storing an " index of log entries. Defaults to ComposedStorage(LRUStorage, LevelStorage).
* @param {number} [params.referencesCount] The number of references to * @param {number} [params.referencesCount] The number of references to
* use for [Log]{@link module:Log} entries. * use for [Log]{@link module:Log} entries.
* @param {number} [params.encryption] Encryption module to encrypt/decrypt database payloads and entries. If provided, the encryption object must take the form { replication: { encrypt, decrypt }, data: { encrypt, decrypt } }.
* @memberof module:OrbitDB * @memberof module:OrbitDB
* @return {module:Database} A database instance. * @return {module:Database} A database instance.
* @throws "Unsupported database type" if the type specified is not in the list * @throws "Unsupported database type" if the type specified is not in the list
@ -112,7 +113,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
* @instance * @instance
* @async * @async
*/ */
const open = async (address, { type, meta, sync, Database, AccessController, headsStorage, entryStorage, indexStorage, referencesCount } = {}) => { const open = async (address, { type, meta, sync, Database, AccessController, headsStorage, entryStorage, indexStorage, referencesCount, encryption } = {}) => {
let name, manifest, accessController let name, manifest, accessController
if (databases[address]) { if (databases[address]) {
@ -153,7 +154,7 @@ const OrbitDB = async ({ ipfs, id, identity, identities, directory } = {}) => {
address = address.toString() address = address.toString()
const db = await Database({ ipfs, identity, address, name, access: accessController, directory, meta, syncAutomatically: sync, headsStorage, entryStorage, indexStorage, referencesCount }) const db = await Database({ ipfs, identity, address, name, access: accessController, directory, meta, syncAutomatically: sync, headsStorage, entryStorage, indexStorage, referencesCount, encryption })
db.events.on('close', onDatabaseClosed(address)) db.events.on('close', onDatabaseClosed(address))

View File

@ -114,6 +114,16 @@ const ComposedStorage = async (storage1, storage2) => {
await storage2.clear() await storage2.clear()
} }
const persist = async (hash) => {
if (storage1.persist) {
await storage1.persist(hash)
}
if (storage2.persist) {
await storage2.persist(hash)
}
}
/** /**
* Calls close on each of the composed storages. * Calls close on each of the composed storages.
* @function * @function
@ -129,6 +139,7 @@ const ComposedStorage = async (storage1, storage2) => {
put, put,
get, get,
del, del,
persist,
iterator, iterator,
merge, merge,
clear, clear,

View File

@ -28,6 +28,8 @@ const DefaultTimeout = 30000 // 30 seconds
const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => { const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
if (!ipfs) throw new Error('An instance of ipfs is required.') if (!ipfs) throw new Error('An instance of ipfs is required.')
const timeoutControllers = new Set()
/** /**
* Puts data to an IPFS block. * Puts data to an IPFS block.
* @function * @function
@ -41,9 +43,7 @@ const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
const { signal } = new TimeoutController(timeout || DefaultTimeout) const { signal } = new TimeoutController(timeout || DefaultTimeout)
await ipfs.blockstore.put(cid, data, { signal }) await ipfs.blockstore.put(cid, data, { signal })
if (pin && !(await ipfs.pins.isPinned(cid))) { await persist(hash)
await drain(ipfs.pins.add(cid))
}
} }
const del = async (hash) => {} const del = async (hash) => {}
@ -58,25 +58,40 @@ const IPFSBlockStorage = async ({ ipfs, pin, timeout } = {}) => {
*/ */
const get = async (hash) => { const get = async (hash) => {
const cid = CID.parse(hash, base58btc) const cid = CID.parse(hash, base58btc)
const { signal } = new TimeoutController(timeout || DefaultTimeout) const controller = new TimeoutController(timeout || DefaultTimeout)
const block = await ipfs.blockstore.get(cid, { signal }) timeoutControllers.add(controller)
const block = await ipfs.blockstore.get(cid, { signal: controller.signal })
timeoutControllers.delete(controller)
if (block) { if (block) {
return block return block
} }
} }
const persist = async (hash) => {
const cid = CID.parse(hash, base58btc)
if (pin && !(await ipfs.pins.isPinned(cid))) {
await drain(ipfs.pins.add(cid))
}
}
const iterator = async function * () {} const iterator = async function * () {}
const merge = async (other) => {} const merge = async (other) => {}
const clear = async () => {} const clear = async () => {}
const close = async () => {} const close = async () => {
for (const controller in timeoutControllers) {
controller.abort()
}
timeoutControllers.clear()
}
return { return {
put, put,
del, del,
get, get,
persist,
iterator, iterator,
merge, merge,
clear, clear,

View File

@ -3,6 +3,7 @@ import PQueue from 'p-queue'
import { EventEmitter } from 'events' import { EventEmitter } from 'events'
import { TimeoutController } from 'timeout-abort-controller' import { TimeoutController } from 'timeout-abort-controller'
import pathJoin from './utils/path-join.js' import pathJoin from './utils/path-join.js'
import { Entry } from './oplog/index.js'
const DefaultTimeout = 30000 // 30 seconds const DefaultTimeout = 30000 // 30 seconds
@ -134,7 +135,7 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
*/ */
events = events || new EventEmitter() events = events || new EventEmitter()
timeout = timeout || DefaultTimeout timeout ??= DefaultTimeout
let started = false let started = false
@ -146,7 +147,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const sendHeads = (source) => { const sendHeads = (source) => {
return (async function * () { return (async function * () {
const heads = await log.heads() const heads = await log.heads()
for await (const { bytes } of heads) { for await (const { hash } of heads) {
const bytes = await log.storage.get(hash)
yield bytes yield bytes
} }
})() })()
@ -156,7 +158,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
for await (const value of source) { for await (const value of source) {
const headBytes = value.subarray() const headBytes = value.subarray()
if (headBytes && onSynced) { if (headBytes && onSynced) {
await onSynced(headBytes) const entry = await Entry.decode(headBytes, log.encryption.replication?.decrypt, log.encryption.data?.decrypt)
await onSynced(entry)
} }
} }
if (started) { if (started) {
@ -219,7 +222,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const task = async () => { const task = async () => {
try { try {
if (data && onSynced) { if (data && onSynced) {
await onSynced(data) const entry = await Entry.decode(data, log.encryption.replication?.decrypt, log.encryption.data?.decrypt)
await onSynced(entry)
} }
} catch (e) { } catch (e) {
events.emit('error', e) events.emit('error', e)
@ -243,8 +247,9 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
* @instance * @instance
*/ */
const add = async (entry) => { const add = async (entry) => {
if (started) { if (started && entry && entry.hash) {
await pubsub.publish(address, entry.bytes) const bytes = await log.storage.get(entry.hash)
await pubsub.publish(address, bytes)
} }
} }
@ -257,7 +262,7 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const stopSync = async () => { const stopSync = async () => {
if (started) { if (started) {
started = false started = false
await queue.onIdle() await queue.clear()
pubsub.removeEventListener('subscription-change', handlePeerSubscribed) pubsub.removeEventListener('subscription-change', handlePeerSubscribed)
pubsub.removeEventListener('message', handleUpdateMessage) pubsub.removeEventListener('message', handleUpdateMessage)
await libp2p.unhandle(headsSyncAddress) await libp2p.unhandle(headsSyncAddress)
@ -275,12 +280,12 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
*/ */
const startSync = async () => { const startSync = async () => {
if (!started) { if (!started) {
// Exchange head entries with peers when connected
await libp2p.handle(headsSyncAddress, handleReceiveHeads)
pubsub.addEventListener('subscription-change', handlePeerSubscribed) pubsub.addEventListener('subscription-change', handlePeerSubscribed)
pubsub.addEventListener('message', handleUpdateMessage) pubsub.addEventListener('message', handleUpdateMessage)
// Subscribe to the pubsub channel for this database through which updates are sent // Subscribe to the pubsub channel for this database through which updates are sent
await pubsub.subscribe(address) await pubsub.subscribe(address)
// Exchange head entries with peers when connected
await libp2p.handle(headsSyncAddress, handleReceiveHeads)
// Remove disconnected peers from `peers`, as otherwise they will not resync heads on reconnection // Remove disconnected peers from `peers`, as otherwise they will not resync heads on reconnection
libp2p.addEventListener('peer:disconnect', handlePeerDisconnected) libp2p.addEventListener('peer:disconnect', handlePeerDisconnected)
started = true started = true

View File

@ -196,27 +196,20 @@ describe('Database - Replication', function () {
db1 = await Database({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1', entryStorage: storage1 }) db1 = await Database({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1', entryStorage: storage1 })
db2 = await Database({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2', entryStorage: storage2 }) db2 = await Database({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2', entryStorage: storage2 })
let connected1 = false let connected = false
let connected2 = false
const onConnected1 = (peerId, heads) => { const onConnected = (peerId, heads) => {
connected1 = true connected = true
} }
const onConnected2 = (peerId, heads) => { db2.events.on('join', onConnected)
connected2 = true
}
db1.events.on('join', onConnected1)
db2.events.on('join', onConnected2)
await db1.addOperation({ op: 'PUT', key: 1, value: 'record 1 on db 1' }) await db1.addOperation({ op: 'PUT', key: 1, value: 'record 1 on db 1' })
await db1.addOperation({ op: 'PUT', key: 2, value: 'record 2 on db 1' }) await db1.addOperation({ op: 'PUT', key: 2, value: 'record 2 on db 1' })
await db1.addOperation({ op: 'PUT', key: 3, value: 'record 3 on db 1' }) await db1.addOperation({ op: 'PUT', key: 3, value: 'record 3 on db 1' })
await db1.addOperation({ op: 'PUT', key: 4, value: 'record 4 on db 1' }) await db1.addOperation({ op: 'PUT', key: 4, value: 'record 4 on db 1' })
await waitFor(() => connected1, () => true) await waitFor(() => connected, () => true)
await waitFor(() => connected2, () => true)
const all1 = [] const all1 = []
for await (const item of db1.log.iterator()) { for await (const item of db1.log.iterator()) {

View File

@ -3,7 +3,7 @@ import { rimraf } from 'rimraf'
import { existsSync } from 'fs' import { existsSync } from 'fs'
import { copy } from 'fs-extra' import { copy } from 'fs-extra'
import Path from 'path' import Path from 'path'
import { Database, Entry, KeyStore, Identities } from '../src/index.js' import { Database, KeyStore, Identities } from '../src/index.js'
import LevelStorage from '../src/storage/level.js' import LevelStorage from '../src/storage/level.js'
import MemoryStorage from '../src/storage/memory.js' import MemoryStorage from '../src/storage/memory.js'
import testKeysPath from './fixtures/test-keys-path.js' import testKeysPath from './fixtures/test-keys-path.js'
@ -68,8 +68,13 @@ describe('Database', function () {
describe('Options', () => { describe('Options', () => {
it('uses default directory for headsStorage', async () => { it('uses default directory for headsStorage', async () => {
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController }) db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' }
const hash = await db.addOperation(op) const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const headsPath = Path.join('./orbitdb/', `${databaseId}/`, '/log/_heads/') const headsPath = Path.join('./orbitdb/', `${databaseId}/`, '/log/_heads/')
@ -78,8 +83,13 @@ describe('Database', function () {
await db.close() await db.close()
const headsStorage = await LevelStorage({ path: headsPath }) const headsStorage = await LevelStorage({ path: headsPath })
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op) strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await headsStorage.close() await headsStorage.close()
@ -88,8 +98,12 @@ describe('Database', function () {
it('uses given directory for headsStorage', async () => { it('uses given directory for headsStorage', async () => {
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './custom-directory' }) db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './custom-directory' })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' } const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const hash = await db.addOperation(op) const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const headsPath = Path.join('./custom-directory/', `${databaseId}/`, '/log/_heads/') const headsPath = Path.join('./custom-directory/', `${databaseId}/`, '/log/_heads/')
@ -99,7 +113,13 @@ describe('Database', function () {
const headsStorage = await LevelStorage({ path: headsPath }) const headsStorage = await LevelStorage({ path: headsPath })
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op) const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await headsStorage.close() await headsStorage.close()
@ -110,23 +130,51 @@ describe('Database', function () {
it('uses given MemoryStorage for headsStorage', async () => { it('uses given MemoryStorage for headsStorage', async () => {
const headsStorage = await MemoryStorage() const headsStorage = await MemoryStorage()
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage }) db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage })
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' } const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const hash = await db.addOperation(op) const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
deepStrictEqual((await Entry.decode(await headsStorage.get(hash))).payload, op) await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await db.close() await db.close()
await headsStorage.close()
await rimraf('./orbitdb')
}) })
it('uses given MemoryStorage for entryStorage', async () => { it('uses given MemoryStorage for entryStorage', async () => {
const entryStorage = await MemoryStorage() const entryStorage = await MemoryStorage()
db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', entryStorage }) const headsStorage = await MemoryStorage()
const op = { op: 'PUT', key: 1, value: 'record 1 on db 1' } db = await Database({ ipfs, identity: testIdentity, address: databaseId, accessController, directory: './orbitdb', headsStorage, entryStorage })
const hash = await db.addOperation(op) const op1 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 1' }
const op2 = { op: 'PUT', key: 1, value: 'record 1 on db 1 version 2' }
deepStrictEqual((await Entry.decode(await entryStorage.get(hash))).payload, op) await db.addOperation(op1)
const hash = await db.addOperation(op2)
const entry = await db.log.get(hash)
const bytes = await headsStorage.get('heads')
const heads = JSON.parse(new TextDecoder().decode(bytes))
strictEqual(heads.length, 1)
strictEqual(heads.at(0).hash, hash)
strictEqual(heads.at(0).next.length, 1)
strictEqual(heads.at(0).next.at(0), entry.next.at(0))
await db.close() await db.close()
await entryStorage.close()
await headsStorage.close()
await rimraf('./orbitdb')
}) })
}) })

View File

@ -320,11 +320,12 @@ describe('KeyValueIndexed Database Replication', function () {
}) })
it('indexes deletes correctly', async () => { it('indexes deletes correctly', async () => {
const databaseId = 'kv-CCC'
let replicated = false let replicated = false
let err
const onError = (err) => { const onError = (error) => {
console.error(err) err = error
deepStrictEqual(err, undefined)
} }
kv1 = await KeyValueIndexed()({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb11' }) kv1 = await KeyValueIndexed()({ ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb11' })
@ -339,11 +340,11 @@ describe('KeyValueIndexed Database Replication', function () {
kv2 = await KeyValueIndexed()({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb22' }) kv2 = await KeyValueIndexed()({ ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb22' })
const onUpdate = (entry) => { const onConnected = (entry) => {
replicated = true replicated = true
} }
kv2.events.on('update', onUpdate) kv2.events.on('join', onConnected)
kv2.events.on('error', onError) kv2.events.on('error', onError)
await waitFor(() => replicated, () => true) await waitFor(() => replicated, () => true)
@ -358,6 +359,8 @@ describe('KeyValueIndexed Database Replication', function () {
all2.push(keyValue) all2.push(keyValue)
} }
deepStrictEqual(err, undefined)
deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [ deepStrictEqual(all2.map(e => { return { key: e.key, value: e.value } }), [
{ key: 'init', value: true }, { key: 'init', value: true },
{ key: 'hello', value: 'friend' } { key: 'hello', value: 'friend' }

View File

@ -33,7 +33,8 @@ describe('Entry', function () {
it('creates a an empty entry', async () => { it('creates a an empty entry', async () => {
const expectedHash = 'zdpuAsKzwUEa8cz9pkJxxFMxLuP3cutA9PDGoLZytrg4RSVEa' const expectedHash = 'zdpuAsKzwUEa8cz9pkJxxFMxLuP3cutA9PDGoLZytrg4RSVEa'
const entry = await create(testIdentity, 'A', 'hello') const entry = await create(testIdentity, 'A', 'hello')
strictEqual(entry.hash, expectedHash) const { hash } = await Entry.encode(entry)
strictEqual(hash, expectedHash)
strictEqual(entry.id, 'A') strictEqual(entry.id, 'A')
strictEqual(entry.clock.id, testIdentity.publicKey) strictEqual(entry.clock.id, testIdentity.publicKey)
strictEqual(entry.clock.time, 0) strictEqual(entry.clock.time, 0)
@ -47,7 +48,8 @@ describe('Entry', function () {
const expectedHash = 'zdpuAmthfqpHRQjdSpKN5etr1GrreJb7QcU1Hshm6pERnzsxi' const expectedHash = 'zdpuAmthfqpHRQjdSpKN5etr1GrreJb7QcU1Hshm6pERnzsxi'
const payload = 'hello world' const payload = 'hello world'
const entry = await create(testIdentity, 'A', payload) const entry = await create(testIdentity, 'A', payload)
strictEqual(entry.hash, expectedHash) const { hash } = await Entry.encode(entry)
strictEqual(hash, expectedHash)
strictEqual(entry.payload, payload) strictEqual(entry.payload, payload)
strictEqual(entry.id, 'A') strictEqual(entry.id, 'A')
strictEqual(entry.clock.id, testIdentity.publicKey) strictEqual(entry.clock.id, testIdentity.publicKey)
@ -81,7 +83,7 @@ describe('Entry', function () {
const payload2 = 'hello again' const payload2 = 'hello again'
const entry1 = await create(testIdentity, 'A', payload1) const entry1 = await create(testIdentity, 'A', payload1)
entry1.clock = tickClock(entry1.clock) entry1.clock = tickClock(entry1.clock)
const entry2 = await create(testIdentity, 'A', payload2, entry1.clock, [entry1]) const entry2 = await create(testIdentity, 'A', payload2, null, entry1.clock, [entry1])
strictEqual(entry2.payload, payload2) strictEqual(entry2.payload, payload2)
strictEqual(entry2.next.length, 1) strictEqual(entry2.next.length, 1)
// strictEqual(entry2.hash, expectedHash) // strictEqual(entry2.hash, expectedHash)
@ -91,7 +93,8 @@ describe('Entry', function () {
it('`next` parameter can be an array of strings', async () => { it('`next` parameter can be an array of strings', async () => {
const entry1 = await create(testIdentity, 'A', 'hello1') const entry1 = await create(testIdentity, 'A', 'hello1')
const entry2 = await create(testIdentity, 'A', 'hello2', null, [entry1.hash]) const { hash } = await Entry.encode(entry1)
const entry2 = await create(testIdentity, 'A', 'hello2', null, null, [hash])
strictEqual(typeof entry2.next[0] === 'string', true) strictEqual(typeof entry2.next[0] === 'string', true)
}) })
@ -138,7 +141,7 @@ describe('Entry', function () {
it('throws an error if next is not an array', async () => { it('throws an error if next is not an array', async () => {
let err let err
try { try {
await create(testIdentity, 'A', 'hello', null, {}) await create(testIdentity, 'A', 'hello', null, null, {})
} catch (e) { } catch (e) {
err = e err = e
} }

View File

@ -1,7 +1,7 @@
import { strictEqual, notStrictEqual, deepStrictEqual } from 'assert' import { strictEqual, notStrictEqual, deepStrictEqual } from 'assert'
import { rimraf } from 'rimraf' import { rimraf } from 'rimraf'
import { copy } from 'fs-extra' import { copy } from 'fs-extra'
import { Log, Entry, Identities, KeyStore } from '../../src/index.js' import { Log, Identities, KeyStore } from '../../src/index.js'
import { Clock } from '../../src/oplog/log.js' import { Clock } from '../../src/oplog/log.js'
import { MemoryStorage } from '../../src/storage/index.js' import { MemoryStorage } from '../../src/storage/index.js'
import testKeysPath from '../fixtures/test-keys-path.js' import testKeysPath from '../fixtures/test-keys-path.js'
@ -760,7 +760,7 @@ describe('Log - Join', async function () {
await log1.storage.merge(log0.storage) await log1.storage.merge(log0.storage)
await headsStorage1.put(e0.hash, e0.bytes) await headsStorage1.put('heads', new TextEncoder().encode(JSON.stringify([{ hash: e0.hash, next: e0.next }])))
await log1.append('hello1') await log1.append('hello1')
await log1.append('hello2') await log1.append('hello2')
@ -863,7 +863,7 @@ describe('Log - Join', async function () {
}) })
describe('throws an error if verification of an entry in given entry\'s history fails', async () => { describe('throws an error if verification of an entry in given entry\'s history fails', async () => {
let e1, e3 let e1
let headsStorage1, headsStorage2 let headsStorage1, headsStorage2
before(async () => { before(async () => {
@ -875,23 +875,19 @@ describe('Log - Join', async function () {
e1 = await log1.append('hello1') e1 = await log1.append('hello1')
await log1.append('hello2') await log1.append('hello2')
e3 = await log1.append('hello3') await log1.append('hello3')
}) })
it('throws an error if an entry doesn\'t have a payload field', async () => { it('throws an error if an entry doesn\'t have a payload field', async () => {
const e = Object.assign({}, e1) const e = Object.assign({}, e1)
delete e.payload delete e.payload
delete e.bytes await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await log2.storage.merge(headsStorage1) await log2.storage.merge(headsStorage1)
let err let err
try { try {
await log2.joinEntry(e3) await log2.joinEntry(e)
} catch (e) { } catch (e) {
err = e err = e
} }
@ -906,16 +902,12 @@ describe('Log - Join', async function () {
const e = Object.assign({}, e1) const e = Object.assign({}, e1)
delete e.key delete e.key
delete e.bytes await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await log2.storage.merge(headsStorage1) await log2.storage.merge(headsStorage1)
let err let err
try { try {
await log2.joinEntry(e3) await log2.joinEntry(e)
} catch (e) { } catch (e) {
err = e err = e
} }
@ -930,16 +922,12 @@ describe('Log - Join', async function () {
const e = Object.assign({}, e1) const e = Object.assign({}, e1)
delete e.sig delete e.sig
delete e.bytes await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes)
await log2.storage.merge(headsStorage1) await log2.storage.merge(headsStorage1)
let err let err
try { try {
await log2.joinEntry(e3) await log2.joinEntry(e)
} catch (e) { } catch (e) {
err = e err = e
} }
@ -953,22 +941,19 @@ describe('Log - Join', async function () {
it('throws an error if an entry signature doesn\'t verify', async () => { it('throws an error if an entry signature doesn\'t verify', async () => {
const e = Object.assign({}, e1) const e = Object.assign({}, e1)
e.sig = '1234567890' e.sig = '1234567890'
delete e.bytes
delete e.hash
const ee = await Entry.encode(e)
await headsStorage1.put(e1.hash, ee.bytes) await headsStorage1.put('heads', JSON.stringify([{ hash: e1.hash, next: e1.next }]))
await log2.storage.merge(headsStorage1) await log2.storage.merge(headsStorage1)
let err let err
try { try {
await log2.joinEntry(e3) await log2.joinEntry(e)
} catch (e) { } catch (e) {
err = e err = e
} }
notStrictEqual(err, undefined) notStrictEqual(err, undefined)
strictEqual(err.message, 'Could not validate signature for entry "zdpuAvkAJ8C46cnGdtFpcBratA5MqK7CcjqCJjjmuKuFvZir3"') strictEqual(err.message, 'Could not validate signature for entry "zdpuAxyE4ScWLf4X6VvkhMrpDQvwdvQno1DhzY5p1U3GPHrBT"')
deepStrictEqual(await log2.all(), []) deepStrictEqual(await log2.all(), [])
deepStrictEqual(await log2.heads(), []) deepStrictEqual(await log2.heads(), [])
}) })

View File

@ -60,15 +60,21 @@ describe('Log', function () {
}) })
it('sets one head if multiple are given as params', async () => { it('sets one head if multiple are given as params', async () => {
const one = await create(testIdentity, 'A', 'entryA', null, []) const one = await create(testIdentity, 'A', 'entryA', null, null, [])
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash]) const { hash: hash1, bytes: bytes1 } = await Entry.encode(one)
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash]) const two = await create(testIdentity, 'A', 'entryB', null, null, [hash1])
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash]) const { hash: hash2, bytes: bytes2 } = await Entry.encode(two)
const three = await create(testIdentity, 'A', 'entryC', null, null, [hash2])
const { hash: hash3, bytes: bytes3 } = await Entry.encode(three)
const four = await create(testIdentity, 'A', 'entryD', null, null, [hash3])
const { hash: hash4, bytes: bytes4 } = await Entry.encode(four)
const entryStorage = await MemoryStorage() const entryStorage = await MemoryStorage()
await entryStorage.put(one.hash, one.bytes) await entryStorage.put(hash1, bytes1)
await entryStorage.put(two.hash, two.bytes) await entryStorage.put(hash2, bytes2)
await entryStorage.put(three.hash, three.bytes) await entryStorage.put(hash3, bytes3)
await entryStorage.put(four.hash, four.bytes) await entryStorage.put(hash4, bytes4)
three.hash = hash3
two.hash = hash2
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, three, two, two], entryStorage }) const log = await Log(testIdentity, { logId: 'A', logHeads: [three, three, two, two], entryStorage })
const values = await log.values() const values = await log.values()
const heads = await log.heads() const heads = await log.heads()
@ -78,15 +84,22 @@ describe('Log', function () {
}) })
it('sets two heads if two given as params', async () => { it('sets two heads if two given as params', async () => {
const one = await create(testIdentity, 'A', 'entryA', null, []) const one = await create(testIdentity, 'A', 'entryA', null, null, [])
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash]) const { hash: hash1, bytes: bytes1 } = await Entry.encode(one)
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash]) const two = await create(testIdentity, 'A', 'entryB', null, null, [hash1])
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash]) const { hash: hash2, bytes: bytes2 } = await Entry.encode(two)
const three = await create(testIdentity, 'A', 'entryC', null, null, [hash2])
const { hash: hash3, bytes: bytes3 } = await Entry.encode(three)
const four = await create(testIdentity, 'A', 'entryD', null, null, [hash2])
const { hash: hash4, bytes: bytes4 } = await Entry.encode(four)
const entryStorage = await MemoryStorage() const entryStorage = await MemoryStorage()
await entryStorage.put(one.hash, one.bytes) await entryStorage.put(hash1, bytes1)
await entryStorage.put(two.hash, two.bytes) await entryStorage.put(hash2, bytes2)
await entryStorage.put(three.hash, three.bytes) await entryStorage.put(hash3, bytes3)
await entryStorage.put(four.hash, four.bytes) await entryStorage.put(hash4, bytes4)
three.hash = hash3
four.hash = hash4
two.hash = hash2
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, four, two], entryStorage }) const log = await Log(testIdentity, { logId: 'A', logHeads: [three, four, two], entryStorage })
const values = await log.values() const values = await log.values()
const heads = await log.heads() const heads = await log.heads()

View File

@ -69,7 +69,7 @@ describe('Log - Replication', function () {
try { try {
if (!messageIsFromMe(message)) { if (!messageIsFromMe(message)) {
const entry = await Entry.decode(message.detail.data) const entry = await Entry.decode(message.detail.data)
await storage1.put(entry.hash, entry.bytes) await storage1.put(entry.hash, message.detail.data)
await log1.joinEntry(entry) await log1.joinEntry(entry)
} }
} catch (e) { } catch (e) {
@ -83,7 +83,7 @@ describe('Log - Replication', function () {
try { try {
if (!messageIsFromMe(message)) { if (!messageIsFromMe(message)) {
const entry = await Entry.decode(message.detail.data) const entry = await Entry.decode(message.detail.data)
await storage2.put(entry.hash, entry.bytes) await storage2.put(entry.hash, message.detail.data)
await log2.joinEntry(entry) await log2.joinEntry(entry)
} }
} catch (e) { } catch (e) {
@ -114,8 +114,10 @@ describe('Log - Replication', function () {
for (let i = 1; i <= amount; i++) { for (let i = 1; i <= amount; i++) {
const entry1 = await input1.append('A' + i) const entry1 = await input1.append('A' + i)
const entry2 = await input2.append('B' + i) const entry2 = await input2.append('B' + i)
await ipfs1.libp2p.services.pubsub.publish(logId, entry1.bytes) const bytes1 = await input1.storage.get(entry1.hash)
await ipfs2.libp2p.services.pubsub.publish(logId, entry2.bytes) const bytes2 = await input1.storage.get(entry2.hash)
await ipfs1.libp2p.services.pubsub.publish(logId, bytes1)
await ipfs2.libp2p.services.pubsub.publish(logId, bytes2)
} }
console.log('Messages sent') console.log('Messages sent')

View File

@ -0,0 +1,376 @@
import { strictEqual, notEqual } from 'assert'
import { rimraf } from 'rimraf'
import path from 'path'
import { createOrbitDB } from '../src/index.js'
import connectPeers from './utils/connect-nodes.js'
import waitFor from './utils/wait-for.js'
import createHelia from './utils/create-helia.js'
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'
import { sha256 } from 'multiformats/hashes/sha2'
import SimpleEncryption from '@orbitdb/simple-encryption'
const codec = dagCbor
const hasher = sha256
const dbPath = './orbitdb/tests/write-permissions'
describe('Encryption', function () {
this.timeout(5000)
let ipfs1, ipfs2
let orbitdb1, orbitdb2
let db1, db2
let replicationEncryption
let dataEncryption
before(async () => {
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
await connectPeers(ipfs1, ipfs2)
await rimraf('./orbitdb')
orbitdb1 = await createOrbitDB({ ipfs: ipfs1, id: 'user1', directory: path.join(dbPath, '1') })
orbitdb2 = await createOrbitDB({ ipfs: ipfs2, id: 'user2', directory: path.join(dbPath, '2') })
replicationEncryption = await SimpleEncryption({ password: 'hello' })
dataEncryption = await SimpleEncryption({ password: 'world' })
})
after(async () => {
if (orbitdb1) {
await orbitdb1.stop()
}
if (orbitdb2) {
await orbitdb2.stop()
}
if (ipfs1) {
await ipfs1.stop()
}
if (ipfs2) {
await ipfs2.stop()
}
await rimraf('./orbitdb')
await rimraf('./ipfs1')
await rimraf('./ipfs2')
})
describe('Data is encrypted when replicated to peers', async () => {
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
if (db2) {
await db2.drop()
await db2.close()
}
})
it('encrypts/decrypts data', async () => {
let connected = false
let updated = false
let error = false
const encryption = {
data: dataEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onUpdate = async (peerId, heads) => {
updated = true
}
db2.events.on('update', onUpdate)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db2.events.on('error', onError)
const hash1 = await db1.add('record 1')
const hash2 = await db1.add('record 2')
strictEqual(await db1.get(hash1), 'record 1')
strictEqual(await db1.get(hash2), 'record 2')
await waitFor(() => updated || error, () => true)
const all = await db2.all()
strictEqual(all.length, 2)
strictEqual(all[0].value, 'record 1')
strictEqual(all[1].value, 'record 2')
})
it('encrypts/decrypts log', async () => {
let connected = false
let updated = false
let error = false
const encryption = {
replication: replicationEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onUpdate = async (peerId, heads) => {
updated = true
}
db2.events.on('update', onUpdate)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db2.events.on('error', onError)
const hash1 = await db1.add('record 1')
const hash2 = await db1.add('record 2')
strictEqual(await db1.get(hash1), 'record 1')
strictEqual(await db1.get(hash2), 'record 2')
await waitFor(() => updated || error, () => true)
const all = await db2.all()
strictEqual(all.length, 2)
strictEqual(all[0].value, 'record 1')
strictEqual(all[1].value, 'record 2')
})
it('encrypts/decrypts log and data', async () => {
let connected = false
let updated = false
let error = false
const encryption = {
replication: replicationEncryption,
data: dataEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onUpdate = async (peerId, heads) => {
updated = true
}
db2.events.on('update', onUpdate)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db2.events.on('error', onError)
const hash1 = await db1.add('record 1')
const hash2 = await db1.add('record 2')
strictEqual(await db1.get(hash1), 'record 1')
strictEqual(await db1.get(hash2), 'record 2')
await waitFor(() => updated || error, () => true)
const all = await db2.all()
strictEqual(all.length, 2)
strictEqual(all[0].value, 'record 1')
strictEqual(all[1].value, 'record 2')
})
it('throws an error if log can\'t be decrypted', async () => {
let connected = false
let hasError = false
let error
const replicationEncryptionWithFailure = await SimpleEncryption({ password: 'goodbye' })
const encryption = {
replication: replicationEncryption
}
const encryptionWithFailure = {
replication: replicationEncryptionWithFailure
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption: encryptionWithFailure })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
error = err
hasError = true
}
db2.events.on('error', onError)
await db1.add('record 1')
await waitFor(() => hasError, () => true)
strictEqual(error.message, 'Could not decrypt entry')
const all = await db2.all()
strictEqual(all.length, 0)
})
it('throws an error if data can\'t be decrypted', async () => {
let connected = false
let hasError = false
let error
const dataEncryptionWithFailure = await SimpleEncryption({ password: 'goodbye' })
const encryption = {
data: dataEncryption
}
const encryptionWithFailure = {
data: dataEncryptionWithFailure
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
db2 = await orbitdb2.open(db1.address, { encryption: encryptionWithFailure })
const onJoin = async (peerId, heads) => {
connected = true
}
db2.events.on('join', onJoin)
await waitFor(() => connected, () => true)
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
error = err
hasError = true
}
db2.events.on('error', onError)
await db1.add('record 1')
await waitFor(() => hasError, () => true)
strictEqual(error.message, 'Could not decrypt payload')
const all = await db2.all()
strictEqual(all.length, 0)
})
})
describe('Data is encrypted in storage', async () => {
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
})
it('payload bytes are encrypted in storage', async () => {
let error
const encryption = {
data: dataEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db1.events.on('error', onError)
const hash1 = await db1.add('record 1')
const bytes = await db1.log.storage.get(hash1)
const { value } = await Block.decode({ bytes, codec, hasher })
const payload = value.payload
strictEqual(payload.constructor, Uint8Array)
try {
await Block.decode({ bytes: payload, codec, hasher })
} catch (e) {
error = e
}
strictEqual(error.message.startsWith('CBOR decode error'), true)
})
it('entry bytes are encrypted in storage', async () => {
let error
const encryption = {
replication: replicationEncryption
}
db1 = await orbitdb1.open('encryption-test-1', { encryption })
const onError = async (err) => {
// Catch "Could not decrypt entry" errors
console.log(err)
error = true
}
db1.events.on('error', onError)
const hash1 = await db1.add('record 1')
let decodedBytes
try {
const bytes = await db1.log.storage.get(hash1)
decodedBytes = await Block.decode({ bytes, codec, hasher })
await Block.decode({ bytes: decodedBytes, codec, hasher })
} catch (e) {
error = e
}
notEqual(error, undefined)
strictEqual(error.message.startsWith('CBOR decode error'), true)
strictEqual(decodedBytes.value.constructor, Uint8Array)
})
})
})

View File

@ -139,8 +139,7 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog111', entryStorage: entryStorage1 }) log1 = await Log(testIdentity1, { logId: 'synclog111', entryStorage: entryStorage1 })
log2 = await Log(testIdentity2, { logId: 'synclog111', entryStorage: entryStorage2 }) log2 = await Log(testIdentity2, { logId: 'synclog111', entryStorage: entryStorage2 })
const onSynced = async (bytes) => { const onSynced = async (entry) => {
const entry = await Entry.decode(bytes)
if (await log2.joinEntry(entry)) { if (await log2.joinEntry(entry)) {
syncedHead = entry syncedHead = entry
syncedEventFired = true syncedEventFired = true
@ -207,8 +206,7 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog7', entryStorage: entryStorage1 }) log1 = await Log(testIdentity1, { logId: 'synclog7', entryStorage: entryStorage1 })
log2 = await Log(testIdentity2, { logId: 'synclog7', entryStorage: entryStorage2 }) log2 = await Log(testIdentity2, { logId: 'synclog7', entryStorage: entryStorage2 })
const onSynced = async (bytes) => { const onSynced = async (entry) => {
const entry = await Entry.decode(bytes)
if (await log2.joinEntry(entry)) { if (await log2.joinEntry(entry)) {
syncedHead = entry syncedHead = entry
} }
@ -291,8 +289,8 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog1' }) log1 = await Log(testIdentity1, { logId: 'synclog1' })
log2 = await Log(testIdentity2, { logId: 'synclog1' }) log2 = await Log(testIdentity2, { logId: 'synclog1' })
const onSynced = async (bytes) => { const onSynced = async (entry) => {
syncedHead = await Entry.decode(bytes) syncedHead = entry
syncedEventFired = expectedEntry.hash === syncedHead.hash syncedEventFired = expectedEntry.hash === syncedHead.hash
} }
@ -348,8 +346,8 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog1' }) log1 = await Log(testIdentity1, { logId: 'synclog1' })
log2 = await Log(testIdentity2, { logId: 'synclog1' }) log2 = await Log(testIdentity2, { logId: 'synclog1' })
const onSynced = async (bytes) => { const onSynced = async (entry) => {
syncedHead = await Entry.decode(bytes) syncedHead = entry
if (expectedEntry) { if (expectedEntry) {
syncedEventFired = expectedEntry.hash === syncedHead.hash syncedEventFired = expectedEntry.hash === syncedHead.hash
} }
@ -434,9 +432,9 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog1' }) log1 = await Log(testIdentity1, { logId: 'synclog1' })
log2 = await Log(testIdentity2, { logId: 'synclog1' }) log2 = await Log(testIdentity2, { logId: 'synclog1' })
const onSynced = async (bytes) => { const onSynced = async (entry) => {
if (expectedEntry && !syncedEventFired) { if (expectedEntry && !syncedEventFired) {
syncedHead = await Entry.decode(bytes) syncedHead = entry
syncedEventFired = expectedEntry.hash === syncedHead.hash syncedEventFired = expectedEntry.hash === syncedHead.hash
} }
} }
@ -518,8 +516,8 @@ describe('Sync protocol', function () {
log1 = await Log(testIdentity1, { logId: 'synclog2' }) log1 = await Log(testIdentity1, { logId: 'synclog2' })
log2 = await Log(testIdentity2, { logId: 'synclog2' }) log2 = await Log(testIdentity2, { logId: 'synclog2' })
const onSynced = async (bytes) => { const onSynced = async (entry) => {
syncedHead = await Entry.decode(bytes) syncedHead = entry
if (expectedEntry) { if (expectedEntry) {
syncedEventFired = expectedEntry ? expectedEntry.hash === syncedHead.hash : false syncedEventFired = expectedEntry ? expectedEntry.hash === syncedHead.hash : false
} }
@ -665,7 +663,7 @@ describe('Sync protocol', function () {
let sync1, sync2 let sync1, sync2
let log1, log2 let log1, log2
const timeoutTime = 1 // 1 millisecond const timeoutTime = 0 // 0 milliseconds
before(async () => { before(async () => {
[ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()]) [ipfs1, ipfs2] = await Promise.all([createHelia(), createHelia()])
@ -701,13 +699,13 @@ describe('Sync protocol', function () {
let err = null let err = null
const onError = (error) => { const onError = (error) => {
(!err) && (err = error) err ??= error
} }
sync1 = await Sync({ ipfs: ipfs1, log: log1, timeout: timeoutTime }) sync1 = await Sync({ ipfs: ipfs1, log: log1, timeout: timeoutTime })
sync2 = await Sync({ ipfs: ipfs2, log: log2, start: false, timeout: timeoutTime })
sync1.events.on('error', onError) sync1.events.on('error', onError)
sync2 = await Sync({ ipfs: ipfs2, log: log2, start: false, timeout: timeoutTime })
sync2.events.on('error', onError) sync2.events.on('error', onError)
await log1.append('hello1') await log1.append('hello1')
@ -718,7 +716,7 @@ describe('Sync protocol', function () {
notStrictEqual(err, null) notStrictEqual(err, null)
strictEqual(err.type, 'aborted') strictEqual(err.type, 'aborted')
strictEqual(err.message, 'Read aborted') strictEqual(err.message.includes('aborted'), true)
}) })
}) })
}) })