test: Connecting to multiple orbitdb instances and multiple databases… (#45)

* test: Connecting to multiple orbitdb instances and multiple databases. Replicating data across multiple dbs.

* test: Higher level orditdb test. Name is accordingly.

* test: Test multiple db stores.

* test: Delete test dirs.

* feat: buffer all records from iterator.

* fix: Linting.

* test: Re-enable all tests.

* test: Wait for replication to complete on all dbs.

* test: Rename storage test file.

* test: Remove unnecessary tests.

* test: Close dbs.

* test: Count all the connecting peers.
This commit is contained in:
Hayden Young 2023-03-25 02:35:19 +08:00 committed by GitHub
parent 2a563bac79
commit 3ff24c298f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 193 additions and 482 deletions

View File

@ -80,6 +80,14 @@ const DocumentStore = async ({ OpLog, Database, ipfs, identity, address, name, a
}
}
const all = async () => {
const values = []
for await (const entry of iterator()) {
values.unshift(entry)
}
return values
}
return {
...database,
type: 'documentstore',
@ -88,7 +96,8 @@ const DocumentStore = async ({ OpLog, Database, ipfs, identity, address, name, a
get,
iterator,
query,
indexBy
indexBy,
all
}
}

View File

@ -41,6 +41,14 @@ const KeyValue = async ({ OpLog, Database, ipfs, identity, address, name, access
}
}
const all = async () => {
const values = []
for await (const entry of iterator()) {
values.unshift(entry)
}
return values
}
return {
...database,
type: 'keyvalue',
@ -48,7 +56,8 @@ const KeyValue = async ({ OpLog, Database, ipfs, identity, address, name, access
set: put, // Alias for put()
del,
get,
iterator
iterator,
all
}
}

View File

@ -1,176 +0,0 @@
// import assert from 'assert'
// import mapSeries from 'p-each-series'
// import rmrf from 'rimraf'
// import OrbitDB from '../src/OrbitDB.js'
// // Include test utilities
// import {
// config,
// startIpfs,
// stopIpfs,
// connectPeers,
// waitForPeers,
// testAPIs,
// } from 'orbit-db-test-utils'
// const dbPath1 = './orbitdb/tests/multiple-databases/1'
// const dbPath2 = './orbitdb/tests/multiple-databases/2'
// const databaseInterfaces = [
// {
// name: 'logdb',
// open: async (orbitdb, address, options) => await orbitdb.log(address, options),
// write: async (db, index) => await db.add('hello' + index),
// query: (db) => db.iterator({ limit: -1 }).collect().length,
// },
// {
// name: 'feed',
// open: async (orbitdb, address, options) => await orbitdb.feed(address, options),
// write: async (db, index) => await db.add('hello' + index),
// query: (db) => db.iterator({ limit: -1 }).collect().length,
// },
// {
// name: 'key-value',
// open: async (orbitdb, address, options) => await orbitdb.keyvalue(address, options),
// write: async (db, index) => await db.put('hello', index),
// query: (db) => db.get('hello'),
// },
// {
// name: 'counterdb',
// open: async (orbitdb, address, options) => await orbitdb.counter(address, options),
// write: async (db, index) => await db.inc(1),
// query: (db) => db.value,
// },
// {
// name: 'documents',
// open: async (orbitdb, address, options) => await orbitdb.docs(address, options),
// write: async (db, index) => await db.put({ _id: 'hello', testing: index }),
// query: (db) => {
// const docs = db.get('hello')
// return docs ? docs[0].testing : 0
// },
// },
// ]
// Object.keys(testAPIs).forEach(API => {
// describe(`orbit-db - Multiple Databases (${API})`, function() {
// this.timeout(config.timeout)
// let ipfsd1, ipfsd2, ipfs1, ipfs2
// let orbitdb1, orbitdb2, db1, db2, db3, db4
// let localDatabases = []
// let remoteDatabases = []
// // Create two IPFS instances and two OrbitDB instances (2 nodes/peers)
// before(async () => {
// rmrf.sync(dbPath1)
// rmrf.sync(dbPath2)
// ipfsd1 = await startIpfs(API, config.daemon1)
// ipfsd2 = await startIpfs(API, config.daemon2)
// ipfs1 = ipfsd1.api
// ipfs2 = ipfsd2.api
// // Connect the peers manually to speed up test times
// const isLocalhostAddress = (addr) => addr.toString().includes('127.0.0.1')
// await connectPeers(ipfs1, ipfs2, { filter: isLocalhostAddress })
// console.log("Peers connected")
// orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
// orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
// })
// after(async () => {
// if(orbitdb1)
// await orbitdb1.stop()
// if(orbitdb2)
// await orbitdb2.stop()
// if (ipfsd1)
// await stopIpfs(ipfsd1)
// if (ipfsd2)
// await stopIpfs(ipfsd2)
// })
// beforeEach(async () => {
// let options = {}
// // Set write access for both clients
// options.write = [
// orbitdb1.identity.publicKey,
// orbitdb2.identity.publicKey
// ],
// console.log("Creating databases and waiting for peers to connect")
// // Open the databases on the first node
// options = Object.assign({}, options, { create: true })
// // Open the databases on the first node
// for (let dbInterface of databaseInterfaces) {
// const db = await dbInterface.open(orbitdb1, dbInterface.name, options)
// localDatabases.push(db)
// }
// for (let [index, dbInterface] of databaseInterfaces.entries()) {
// const address = localDatabases[index].address.toString()
// const db = await dbInterface.open(orbitdb2, address, options)
// remoteDatabases.push(db)
// }
// // Wait for the peers to connect
// await waitForPeers(ipfs1, [orbitdb2.id], localDatabases[0].address.toString())
// await waitForPeers(ipfs2, [orbitdb1.id], localDatabases[0].address.toString())
// console.log("Peers connected")
// })
// afterEach(async () => {
// for (let db of remoteDatabases)
// await db.drop()
// for (let db of localDatabases)
// await db.drop()
// })
// it('replicates multiple open databases', async () => {
// const entryCount = 32
// const entryArr = []
// // Create an array that we use to create the db entries
// for (let i = 1; i < entryCount + 1; i ++)
// entryArr.push(i)
// // Write entries to each database
// console.log("Writing to databases")
// for (let index = 0; index < databaseInterfaces.length; index++) {
// const dbInterface = databaseInterfaces[index]
// const db = localDatabases[index]
// await mapSeries(entryArr, val => dbInterface.write(db, val))
// }
// // Function to check if all databases have been replicated
// const allReplicated = () => {
// return remoteDatabases.every(db => db._oplog.length === entryCount)
// }
// console.log("Waiting for replication to finish")
// return new Promise((resolve, reject) => {
// const interval = setInterval(() => {
// if (allReplicated()) {
// clearInterval(interval)
// // Verify that the databases contain all the right entries
// databaseInterfaces.forEach((dbInterface, index) => {
// const db = remoteDatabases[index]
// const result = dbInterface.query(db)
// assert.equal(result, entryCount)
// assert.equal(db._oplog.length, entryCount)
// })
// resolve()
// }
// }, 200)
// })
// })
// })
// })

View File

@ -0,0 +1,173 @@
import { strictEqual } from 'assert'
// import mapSeries from 'p-each-series'
import * as IPFS from 'ipfs'
import rmrf from 'rimraf'
import OrbitDB from '../src/OrbitDB.js'
import config from './config.js'
import connectPeers from './utils/connect-nodes.js'
import waitFor from './utils/wait-for.js'
const dbPath1 = './orbitdb/tests/multiple-databases/1'
const dbPath2 = './orbitdb/tests/multiple-databases/2'
const databaseInterfaces = [
{
name: 'event-store',
open: async (orbitdb, address, options) => await orbitdb.open(address, options),
write: async (db, index) => {
await db.add('hello' + index)
},
query: async (db) => {
const all = await db.all()
return all.length
}
},
{
name: 'key-value',
open: async (orbitdb, address, options) => await orbitdb.open(address, { ...options, type: 'keyvalue' }),
write: async (db, index) => await db.put('hello', index),
query: async (db) => await db.get('hello')
},
{
name: 'documents',
open: async (orbitdb, address, options) => await orbitdb.open(address, { ...options, type: 'documents' }),
write: async (db, index) => await db.put({ _id: 'hello', testing: index }),
query: async (db) => {
const doc = await db.get('hello')
return doc ? doc.value.testing : 0
}
}
]
describe('orbit-db - Multiple Databases', function () {
this.timeout(30000)
let ipfs1, ipfs2
let orbitdb1, orbitdb2
const localDatabases = []
const remoteDatabases = []
// Create two IPFS instances and two OrbitDB instances (2 nodes/peers)
before(async () => {
ipfs1 = await IPFS.create({ ...config.daemon1, repo: './ipfs1' })
ipfs2 = await IPFS.create({ ...config.daemon2, repo: './ipfs2' })
await connectPeers(ipfs1, ipfs2)
console.log('Peers connected')
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1', directory: dbPath1 })
orbitdb2 = await OrbitDB({ ipfs: ipfs2, id: 'user2', directory: dbPath2 })
})
after(async () => {
if (orbitdb1) {
await orbitdb1.stop()
}
if (orbitdb2) {
await orbitdb2.stop()
}
await rmrf('./orbitdb')
if (ipfs1) {
await ipfs1.stop()
}
if (ipfs2) {
await ipfs2.stop()
}
await rmrf('./ipfs1')
await rmrf('./ipfs2')
})
beforeEach(async () => {
let options = {}
// Set write access for both clients
options.write = [
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
]
let connected1Count = 0
let connected2Count = 0
const onConnected1 = async (peerId, heads) => {
++connected1Count
}
const onConnected2 = async (peerId, heads) => {
++connected2Count
}
console.log('Creating databases and waiting for peers to connect')
// Open the databases on the first node
options = Object.assign({}, options, { create: true })
// Open the databases on the first node
for (const dbInterface of databaseInterfaces) {
const db = await dbInterface.open(orbitdb1, dbInterface.name, options)
db.events.on('join', onConnected1)
localDatabases.push(db)
}
for (const [index, dbInterface] of databaseInterfaces.entries()) {
const address = localDatabases[index].address.toString()
const db = await dbInterface.open(orbitdb2, address, options)
db.events.on('join', onConnected2)
remoteDatabases.push(db)
}
// Wait for the peers to connect
await waitFor(() => connected1Count === 3, () => true)
await waitFor(() => connected2Count === 3, () => true)
console.log('Peers connected')
})
afterEach(async () => {
for (const db of remoteDatabases) {
await db.drop()
await db.close()
}
for (const db of localDatabases) {
await db.drop()
await db.close()
}
})
it('replicates multiple open databases', async () => {
const entryCount = 32
// Write entries to each database
console.log('Writing to databases')
for (let index = 0; index < databaseInterfaces.length; index++) {
const dbInterface = databaseInterfaces[index]
const db = localDatabases[index]
// Create an array that we use to create the db entries
for (let i = 1; i < entryCount + 1; i++) {
await dbInterface.write(db, i)
}
}
const isReplicated = async (db) => {
const all = await db.log.all()
return all.length === entryCount
}
// Function to check if all databases have been replicated
const allReplicated = () => remoteDatabases.every(isReplicated)
console.log('Waiting for replication to finish')
await waitFor(() => allReplicated(), () => true)
for (let i = 0; i < databaseInterfaces.length; i++) {
const db = remoteDatabases[i]
const result = await databaseInterfaces[i].query(db)
strictEqual(result, entryCount)
strictEqual((await db.log.all()).length, entryCount)
}
})
})

View File

@ -1,101 +0,0 @@
// import fs from 'fs'
// import assert from 'assert'
// import rmrf from 'rimraf'
// import OrbitDB from '../src/OrbitDB.js'
// import Identities from 'orbit-db-identity-provider'
// import Keystore from 'orbit-db-keystore'
// import storageAdapter from 'orbit-db-storage-adapter'
// // Include test utilities
// import {
// config,
// startIpfs,
// stopIpfs,
// testAPIs,
// } from 'orbit-db-test-utils'
// const storage = storageAdapter()
// const keysPath = './orbitdb/identity/identitykeys'
// const dbPath = './orbitdb/tests/change-identity'
// Object.keys(testAPIs).forEach(API => {
// describe(`orbit-db - Set identities (${API})`, function() {
// this.timeout(config.timeout)
// let ipfsd, ipfs, orbitdb, keystore, options
// let identity1, identity2
// before(async () => {
// rmrf.sync(dbPath)
// ipfsd = await startIpfs(API, config.daemon1)
// ipfs = ipfsd.api
// if(fs && fs.mkdirSync) fs.mkdirSync(keysPath, { recursive: true })
// const identityStore = await storage.createStore(keysPath)
// keystore = new Keystore(identityStore)
// identity1 = await Identities.createIdentity({ id: 'test-id1', keystore })
// identity2 = await Identities.createIdentity({ id: 'test-id2', keystore })
// orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
// })
// after(async () => {
// await keystore.close()
// if(orbitdb)
// await orbitdb.stop()
// if (ipfsd)
// await stopIpfs(ipfsd)
// })
// beforeEach(async () => {
// options = {}
// options.accessController = {
// write : [
// orbitdb.identity.id,
// identity1.id
// ]
// }
// options = Object.assign({}, options, { create: true, type: 'eventlog', overwrite: true })
// })
// it('sets identity', async () => {
// const db = await orbitdb.open('abc', options)
// assert.equal(db.identity, orbitdb.identity)
// db.setIdentity(identity1)
// assert.equal(db.identity, identity1)
// await db.close()
// })
// it('writes with new identity with access', async () => {
// const db = await orbitdb.open('abc', options)
// assert.equal(db.identity, orbitdb.identity)
// db.setIdentity(identity1)
// assert.equal(db.identity, identity1)
// let err
// try {
// await db.add({ hello: '1'})
// } catch (e) {
// err = e.message
// }
// assert.equal(err, null)
// await db.drop()
// })
// it('cannot write with new identity without access', async () => {
// const db = await orbitdb.open('abc', options)
// assert.equal(db.identity, orbitdb.identity)
// db.setIdentity(identity2)
// assert.equal(db.identity, identity2)
// let err
// try {
// await db.add({ hello: '1'})
// } catch (e) {
// err = e.message
// }
// assert.equal(err, `Could not append entry, key "${identity2.id}" is not allowed to write to the log`)
// await db.drop()
// })
// })
// })

View File

@ -1,203 +0,0 @@
// import assert from 'assert'
// import fs from 'fs-extra'
// import path from 'path'
// import rmrf from 'rimraf'
// import leveldown from 'leveldown'
// import Zip from 'adm-zip'
// import OrbitDB from '../src/OrbitDB.js'
// import Identities from 'orbit-db-identity-provider'
// import migrate from 'localstorage-level-migration'
// import Keystore from 'orbit-db-keystore'
// import storageAdapter from 'orbit-db-storage-adapter'
// // Include test utilities
// import {
// config,
// startIpfs,
// stopIpfs,
// testAPIs,
// } from 'orbit-db-test-utils'
// const storage = storageAdapter(leveldown)
// storage.preCreate = async (directory, options) => {
// fs.mkdirSync(directory, { recursive: true })
// }
// const dbPath = path.join('./orbitdb', 'tests', 'v0')
// const dbFixturesDir = path.join('./test', 'fixtures', 'v0', 'QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC', 'v0-db')
// const keyFixtures = path.join('./test', 'fixtures', 'keys','QmRfPsKJs9YqTot5krRibra4gPwoK4kghhU8iKWxBjGDDX')
// const ipfsFixturesDir = path.join('./test', 'fixtures', 'ipfs')
// Object.keys(testAPIs).forEach(API => {
// const ipfsFixtures = path.join('./test', 'fixtures', `js-ipfs.zip`)
// describe(`orbit-db - Backward-Compatibility - Open & Load (${API})`, function () {
// this.retries(1) // windows...
// this.timeout(config.timeout)
// let ipfsd, ipfs, orbitdb, db, keystore
// before(async () => {
// ipfsd = await startIpfs(API, config.daemon1)
// ipfs = ipfsd.api
// rmrf.sync(dbPath)
// const zip = new Zip(ipfsFixtures)
// await zip.extractAllToAsync(path.join('./test', 'fixtures'), true)
// const filterFunc = (src, dest) => {
// // windows has problems copying these files...
// return !(src.includes('LOG') || src.includes('LOCK'))
// }
// // copy data files to ipfs and orbitdb repos
// await fs.copy(path.join(ipfsFixturesDir, 'blocks'), path.join(ipfsd.path, 'blocks'))
// await fs.copy(path.join(ipfsFixturesDir, 'datastore'), path.join(ipfsd.path, 'datastore'), { filter: filterFunc })
// const peerId = String((await ipfs.id()).id)
// const store = await storage.createStore(path.join(dbPath, peerId, 'keys'))
// keystore = new Keystore(store)
// const identity = await Identities.createIdentity({ id: peerId, migrate: migrate(keyFixtures), keystore })
// orbitdb = await OrbitDB.createInstance(ipfs, { identity, keystore })
// })
// after(async () => {
// await keystore.close()
// if (orbitdb)
// await orbitdb.stop()
// if (ipfsd)
// await stopIpfs(ipfsd)
// rmrf.sync(ipfsFixturesDir)
// rmrf.sync('./orbitdb')
// })
// describe('Open & Load - V0 entries', function () {
// before(async () => {
// await fs.copy(dbFixturesDir, dbPath)
// db = await orbitdb.open('/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db', { directory: dbPath, accessController: { type: 'legacy-ipfs', skipManifest: true } })
// const localFixtures = await db._cache.get('_localHeads')
// const remoteFixtures = await db._cache.get('_remoteHeads')
// await db._cache.set(db.localHeadsPath, localFixtures)
// await db._cache.set(db.remoteHeadsPath, remoteFixtures)
// await db.load()
// })
// beforeEach(async () => {
// if (process.platform === 'win32') {
// // for some reason Windows does not load the database correctly at the first time.
// // this is not a good solution but... it works.
// await db.load()
// }
// })
// after(async () => {
// rmrf.sync(dbPath)
// if (db)
// await db.close()
// })
// it('open v0 orbitdb address', async () => {
// assert.notEqual(db, null)
// })
// it('database has the correct v0 address', async () => {
// assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
// assert.equal(db.address.toString().indexOf('Qm'), 9)
// assert.equal(db.address.toString().indexOf('v0-db'), 56)
// })
// it('has the correct type', async () => {
// assert.equal(db.type, 'feed')
// })
// it('database has the correct access-controller', async () => {
// assert.equal(db.options.accessControllerAddress, '/ipfs/Qmc3S7aMSmH8oGmx7Zdp8UxVWcDyCq5o2H9qYFgT3GW6nM')
// assert.equal(db.access.type, 'legacy-ipfs')
// assert.strictEqual(db.access.write[0], '04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78')
// })
// it('load v0 orbitdb address', async () => {
// assert.equal(db.all.length, 3)
// })
// it('allows migrated key to write', async () => {
// const hash = await db.add({ thing: 'new addition' })
// const newEntries = db.all.filter(e => e.v > 0)
// assert.equal(newEntries.length, 1)
// assert.strictEqual(newEntries[0].hash, hash)
// })
// })
// describe('Open & Load - V1 entries', function () {
// const dbPath2 = './orbitdb/tests/v1'
// const dbv1Fix = './test/fixtures/v1/QmZrWipUpBNx5VjBTESCeJBQuj4rWahZMz8CV8hBjdJAec/cache'
// const v1Address = '/orbitdb/zdpuAqpKBwd7ojM77o3rRVKA1PAEQBnWoRASY3ugJ7zqnM6z7/v1-entries'
// before(async () => {
// await fs.copy(dbv1Fix, dbPath2)
// db = await orbitdb.open(v1Address, { directory: dbPath2 })
// await db.load()
// })
// beforeEach(async () => {
// if (process.platform === 'win32') {
// // for some reason Windows does not load the database correctly at the first time.
// // this is not a good solution but... it works.
// await db.load()
// }
// })
// after(async () => {
// rmrf.sync(dbPath2)
// if (db)
// await db.close()
// })
// it('open v1 orbitdb address', async () => {
// assert.notEqual(db, null)
// })
// it('database has the correct v1 address', async () => {
// assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
// assert.equal(db.address.toString().indexOf('zd'), 9)
// assert.equal(db.address.toString().indexOf('v1-entries'), 59)
// })
// it('has the correct type', async () => {
// assert.equal(db.type, 'feed')
// })
// it('database has the correct access-controller', async () => {
// assert.equal(db.access.type, 'ipfs')
// assert.equal(db.options.accessControllerAddress, '/ipfs/zdpuAsYRtJLLLDibnmxWPzyRGJEqtjmJP27ppKWcLreNGGTFN')
// assert.strictEqual(db.access.write[0], '*')
// })
// it('load v1 orbitdb address', async () => {
// assert.equal(db.all.length, 100)
// })
// it('allows adding new entry', async () => {
// const hash = await db.add('new entry')
// const newEntries = db.all.filter(e => e.v > 1)
// assert.equal(newEntries.length, 1)
// assert.strictEqual(newEntries[0].hash, hash)
// })
// it('reopens db after adding new entry', async () => {
// await db.close()
// db = await orbitdb.open(v1Address, { directory: dbPath2 })
// assert.notEqual(db, null)
// await db.load()
// assert.equal(db.all.length, 101)
// const newEntries = db.all.filter(e => e.v > 1)
// assert.equal(newEntries.length, 1)
// })
// })
// })
// })