mirror of
https://github.com/orbitdb/orbitdb.git
synced 2025-10-07 22:57:07 +00:00
Merge pull request #47 from orbitdb/dev/improve-benchmarks
Dev/improve benchmarks
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
import { Identities } from '../src/index.js'
|
||||
import { Log } from '../src/index.js'
|
||||
import { MemoryStorage, LevelStorage } from '../src/storage/index.js'
|
||||
import { Identities, Log } from '../src/index.js'
|
||||
import { MemoryStorage } from '../src/storage/index.js'
|
||||
// import { MemoryStorage, LevelStorage, LRUStorage } from '../src/storage/index.js'
|
||||
import rmrf from 'rimraf'
|
||||
|
||||
// State
|
||||
let log
|
||||
@@ -11,6 +12,9 @@ let seconds = 0
|
||||
let queriesPerSecond = 0
|
||||
let lastTenSeconds = 0
|
||||
|
||||
// Settings
|
||||
const benchmarkDuration = 20 // seconds
|
||||
|
||||
const queryLoop = async () => {
|
||||
await log.append(totalQueries.toString())
|
||||
totalQueries++
|
||||
@@ -22,6 +26,10 @@ const queryLoop = async () => {
|
||||
;(async () => {
|
||||
console.log('Starting benchmark...')
|
||||
|
||||
console.log('Benchmark duration is ' + benchmarkDuration + ' seconds')
|
||||
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
const identities = await Identities()
|
||||
const testIdentity = await identities.createIdentity({ id: 'userA' })
|
||||
|
||||
@@ -29,6 +37,9 @@ const queryLoop = async () => {
|
||||
// in case we want to benchmark different storage modules
|
||||
const entryStorage = await MemoryStorage()
|
||||
const headsStorage = await MemoryStorage()
|
||||
// Test LRUStorage
|
||||
// const entryStorage = await LRUStorage()
|
||||
// const headsStorage = await LRUStorage()
|
||||
// Test LevelStorage
|
||||
// const entryStorage = await LevelStorage({ path: './logA/entries' })
|
||||
// const headsStorage = await LevelStorage({ path: './logA/heads' })
|
||||
@@ -36,13 +47,18 @@ const queryLoop = async () => {
|
||||
log = await Log(testIdentity, { logId: 'A', entryStorage, headsStorage })
|
||||
|
||||
// Output metrics at 1 second interval
|
||||
setInterval(() => {
|
||||
const interval = setInterval(async () => {
|
||||
seconds++
|
||||
if (seconds % 10 === 0) {
|
||||
console.log(`--> Average of ${lastTenSeconds / 10} q/s in the last 10 seconds`)
|
||||
if (lastTenSeconds === 0) throw new Error('Problems!')
|
||||
lastTenSeconds = 0
|
||||
}
|
||||
if (seconds >= benchmarkDuration) {
|
||||
clearInterval(interval)
|
||||
await rmrf('./orbitdb')
|
||||
process.exit(0)
|
||||
}
|
||||
console.log(`${queriesPerSecond} queries per second, ${totalQueries} queries in ${seconds} seconds`)
|
||||
queriesPerSecond = 0
|
||||
}, 1000)
|
||||
58
benchmarks/log-iterator.js
Normal file
58
benchmarks/log-iterator.js
Normal file
@@ -0,0 +1,58 @@
|
||||
import { Identities, Log } from '../src/index.js'
|
||||
import { MemoryStorage } from '../src/storage/index.js'
|
||||
// import { MemoryStorage, LevelStorage, LRUStorage } from '../src/storage/index.js'
|
||||
import rmrf from 'rimraf'
|
||||
|
||||
;(async () => {
|
||||
console.log('Starting benchmark...')
|
||||
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
const identities = await Identities()
|
||||
const testIdentity = await identities.createIdentity({ id: 'userA' })
|
||||
|
||||
// MemoryStorage is the default storage for Log but defining them here
|
||||
// in case we want to benchmark different storage modules
|
||||
const entryStorage = await MemoryStorage()
|
||||
const headsStorage = await MemoryStorage()
|
||||
// Test LRUStorage
|
||||
// const entryStorage = await LRUStorage()
|
||||
// const headsStorage = await LRUStorage()
|
||||
// Test LevelStorage
|
||||
// const entryStorage = await LevelStorage({ path: './logA/entries' })
|
||||
// const headsStorage = await LevelStorage({ path: './logA/heads' })
|
||||
|
||||
const log = await Log(testIdentity, { logId: 'A', entryStorage, headsStorage })
|
||||
|
||||
const entryCount = 10000
|
||||
|
||||
console.log(`Append ${entryCount} entries`)
|
||||
|
||||
const startTime1 = new Date().getTime()
|
||||
for (let i = 0; i < entryCount; i++) {
|
||||
await log.append(i.toString(), { pointerCount: 0 })
|
||||
}
|
||||
const endTime1 = new Date().getTime()
|
||||
const duration1 = endTime1 - startTime1
|
||||
const operationsPerSecond1 = Math.floor(entryCount / (duration1 / 1000))
|
||||
const millisecondsPerOp1 = duration1 / entryCount
|
||||
|
||||
console.log(`Appending ${entryCount} entries took ${duration1} ms, ${operationsPerSecond1} ops/s, ${millisecondsPerOp1} ms/op`)
|
||||
|
||||
console.log(`Iterate ${entryCount} entries`)
|
||||
const startTime2 = new Date().getTime()
|
||||
const all = []
|
||||
for await (const entry of log.iterator()) {
|
||||
all.unshift(entry)
|
||||
}
|
||||
const endTime2 = new Date().getTime()
|
||||
const duration2 = endTime2 - startTime2
|
||||
const operationsPerSecond2 = Math.floor(entryCount / (duration2 / 1000))
|
||||
const millisecondsPerOp2 = duration2 / entryCount
|
||||
|
||||
console.log(`Iterating ${all.length} entries took ${duration2} ms, ${operationsPerSecond2} ops/s, ${millisecondsPerOp2} ms/op`)
|
||||
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
process.exit(0)
|
||||
})()
|
||||
87
benchmarks/orbitdb-keyvalue.js
Normal file
87
benchmarks/orbitdb-keyvalue.js
Normal file
@@ -0,0 +1,87 @@
|
||||
import { OrbitDB } from '../src/index.js'
|
||||
import rmrf from 'rimraf'
|
||||
import * as IPFS from 'ipfs'
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
|
||||
EventEmitter.defaultMaxListeners = 10000
|
||||
|
||||
const ipfsConfig = {
|
||||
preload: {
|
||||
enabled: false
|
||||
},
|
||||
EXPERIMENTAL: {
|
||||
pubsub: true
|
||||
},
|
||||
config: {
|
||||
Addresses: {
|
||||
API: '/ip4/127.0.0.1/tcp/0',
|
||||
Swarm: ['/ip4/0.0.0.0/tcp/0'],
|
||||
Gateway: '/ip4/0.0.0.0/tcp/0'
|
||||
},
|
||||
Bootstrap: [],
|
||||
Discovery: {
|
||||
MDNS: {
|
||||
Enabled: false,
|
||||
Interval: 0
|
||||
},
|
||||
webRTCStar: {
|
||||
Enabled: false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
console.log('Starting benchmark...')
|
||||
|
||||
const entryCount = 1000
|
||||
|
||||
await rmrf('./ipfs')
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
const ipfs = await IPFS.create({ ...ipfsConfig, repo: './ipfs' })
|
||||
const orbitdb = await OrbitDB({ ipfs })
|
||||
|
||||
console.log(`Set ${entryCount} keys/values`)
|
||||
|
||||
const db1 = await orbitdb.open('benchmark-keyvalue', { type: 'keyvalue' })
|
||||
|
||||
const startTime1 = new Date().getTime()
|
||||
|
||||
for (let i = 0; i < entryCount; i++) {
|
||||
await db1.set(i.toString(), 'hello' + i)
|
||||
}
|
||||
|
||||
const endTime1 = new Date().getTime()
|
||||
const duration1 = endTime1 - startTime1
|
||||
const operationsPerSecond1 = Math.floor(entryCount / (duration1 / 1000))
|
||||
const millisecondsPerOp1 = duration1 / entryCount
|
||||
console.log(`Setting ${entryCount} key/values took ${duration1} ms, ${operationsPerSecond1} ops/s, ${millisecondsPerOp1} ms/op`)
|
||||
|
||||
console.log(`Iterate ${entryCount} key/values`)
|
||||
const startTime2 = new Date().getTime()
|
||||
|
||||
const all = []
|
||||
for await (const { key, value } of db1.iterator()) {
|
||||
all.unshift({ key, value })
|
||||
}
|
||||
|
||||
const endTime2 = new Date().getTime()
|
||||
const duration2 = endTime2 - startTime2
|
||||
const operationsPerSecond2 = Math.floor(entryCount / (duration2 / 1000))
|
||||
const millisecondsPerOp2 = duration2 / entryCount
|
||||
|
||||
console.log(`Iterating ${all.length} key/values took ${duration2} ms, ${operationsPerSecond2} ops/s, ${millisecondsPerOp2} ms/op`)
|
||||
|
||||
await db1.drop()
|
||||
await db1.close()
|
||||
|
||||
await orbitdb.stop()
|
||||
await ipfs.stop()
|
||||
|
||||
await rmrf('./ipfs')
|
||||
await rmrf('./orbitdb')
|
||||
|
||||
process.exit(0)
|
||||
})()
|
||||
111
benchmarks/orbitdb-replicate.js
Normal file
111
benchmarks/orbitdb-replicate.js
Normal file
@@ -0,0 +1,111 @@
|
||||
import { OrbitDB } from '../src/index.js'
|
||||
import rmrf from 'rimraf'
|
||||
import * as IPFS from 'ipfs'
|
||||
import connectPeers from '../test/utils/connect-nodes.js'
|
||||
import waitFor from '../test/utils/wait-for.js'
|
||||
|
||||
import { EventEmitter } from 'events'
|
||||
|
||||
EventEmitter.defaultMaxListeners = 10000
|
||||
|
||||
const ipfsConfig = {
|
||||
preload: {
|
||||
enabled: false
|
||||
},
|
||||
EXPERIMENTAL: {
|
||||
pubsub: true
|
||||
},
|
||||
config: {
|
||||
Addresses: {
|
||||
API: '/ip4/127.0.0.1/tcp/0',
|
||||
Swarm: ['/ip4/0.0.0.0/tcp/0'],
|
||||
Gateway: '/ip4/0.0.0.0/tcp/0'
|
||||
},
|
||||
Bootstrap: [],
|
||||
Discovery: {
|
||||
MDNS: {
|
||||
Enabled: true,
|
||||
Interval: 0
|
||||
},
|
||||
webRTCStar: {
|
||||
Enabled: false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
console.log('Starting benchmark...')
|
||||
|
||||
const entryCount = 1000
|
||||
|
||||
await rmrf('./ipfs1')
|
||||
await rmrf('./ipfs2')
|
||||
await rmrf('./orbitdb1')
|
||||
await rmrf('./orbitdb2')
|
||||
|
||||
const ipfs1 = await IPFS.create({ ...ipfsConfig, repo: './ipfs1' })
|
||||
const ipfs2 = await IPFS.create({ ...ipfsConfig, repo: './ipfs2' })
|
||||
const orbitdb1 = await OrbitDB({ ipfs: ipfs1, directory: './orbitdb1' })
|
||||
const orbitdb2 = await OrbitDB({ ipfs: ipfs2, directory: './orbitdb2' })
|
||||
|
||||
await connectPeers(ipfs1, ipfs2)
|
||||
|
||||
console.log(`Add ${entryCount} events`)
|
||||
|
||||
const db1 = await orbitdb1.open('benchmark-replication', { type: 'events' })
|
||||
|
||||
const startTime1 = new Date().getTime()
|
||||
|
||||
for (let i = 0; i < entryCount; i++) {
|
||||
await db1.add('hello' + i)
|
||||
}
|
||||
|
||||
const endTime1 = new Date().getTime()
|
||||
const duration1 = endTime1 - startTime1
|
||||
const operationsPerSecond1 = Math.floor(entryCount / (duration1 / 1000))
|
||||
const millisecondsPerOp1 = duration1 / entryCount
|
||||
console.log(`Adding ${entryCount} events took ${duration1} ms, ${operationsPerSecond1} ops/s, ${millisecondsPerOp1} ms/op`)
|
||||
|
||||
const db2 = await orbitdb2.open(db1.address)
|
||||
|
||||
let connected = false
|
||||
|
||||
const onJoin = async (peerId) => (connected = true)
|
||||
|
||||
db2.events.on('join', onJoin)
|
||||
|
||||
await waitFor(() => connected, () => true)
|
||||
|
||||
console.log(`Iterate ${entryCount} events`)
|
||||
const startTime2 = new Date().getTime()
|
||||
|
||||
const all = []
|
||||
for await (const { value } of db2.iterator()) {
|
||||
all.unshift(value)
|
||||
}
|
||||
|
||||
const endTime2 = new Date().getTime()
|
||||
const duration2 = endTime2 - startTime2
|
||||
const operationsPerSecond2 = Math.floor(entryCount / (duration2 / 1000))
|
||||
const millisecondsPerOp2 = duration2 / entryCount
|
||||
|
||||
console.log(`Iterating ${all.length} events took ${duration2} ms, ${operationsPerSecond2} ops/s, ${millisecondsPerOp2} ms/op`)
|
||||
|
||||
await db1.drop()
|
||||
await db1.close()
|
||||
await db2.drop()
|
||||
await db2.close()
|
||||
|
||||
await orbitdb1.stop()
|
||||
await orbitdb2.stop()
|
||||
await ipfs1.stop()
|
||||
await ipfs2.stop()
|
||||
|
||||
await rmrf('./ipfs1')
|
||||
await rmrf('./ipfs2')
|
||||
await rmrf('./orbitdb1')
|
||||
await rmrf('./orbitdb2')
|
||||
|
||||
process.exit(0)
|
||||
})()
|
||||
@@ -75,7 +75,6 @@
|
||||
],
|
||||
"ignore": [
|
||||
"examples/**",
|
||||
"benchmarks/**",
|
||||
"test/fixtures/**",
|
||||
"test/browser/**"
|
||||
]
|
||||
|
||||
@@ -57,7 +57,7 @@ const OrbitDB = async ({ ipfs, id, identity, keystore, directory } = {}) => {
|
||||
|
||||
let databases = {}
|
||||
|
||||
const open = async (address, { type, meta, write, Store } = {}) => {
|
||||
const open = async (address, { type, meta, write, sync, Store } = {}) => {
|
||||
let name, manifest, accessController
|
||||
|
||||
if (type && !databaseTypes[type]) {
|
||||
@@ -97,7 +97,7 @@ const OrbitDB = async ({ ipfs, id, identity, keystore, directory } = {}) => {
|
||||
throw new Error(`Unspported database type: '${type}'`)
|
||||
}
|
||||
|
||||
const db = await DatabaseModel({ OpLog, Database, ipfs, identity, address: address.toString(), name, accessController, directory, meta })
|
||||
const db = await DatabaseModel({ OpLog, Database, ipfs, identity, address: address.toString(), name, accessController, directory, meta, syncAutomatically: isDefined(sync) ? sync : true })
|
||||
|
||||
db.events.on('close', onDatabaseClosed(address.toString()))
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import { ComposedStorage, LRUStorage, IPFSBlockStorage, LevelStorage } from './s
|
||||
const defaultPointerCount = 0
|
||||
const defaultCacheSize = 1000
|
||||
|
||||
const Database = async ({ OpLog, ipfs, identity, address, name, accessController, directory, meta, headsStorage, entryStorage, pointerCount }) => {
|
||||
const Database = async ({ OpLog, ipfs, identity, address, name, accessController, directory, meta, headsStorage, entryStorage, pointerCount, syncAutomatically }) => {
|
||||
const { Log, Entry } = OpLog
|
||||
|
||||
directory = Path.join(directory || './orbitdb', `./${address}/`)
|
||||
@@ -73,7 +73,7 @@ const Database = async ({ OpLog, ipfs, identity, address, name, accessController
|
||||
// Start the Sync protocol
|
||||
// Sync protocol exchanges OpLog heads (latest known entries) between peers when they connect
|
||||
// Sync emits 'join', 'leave' and 'error' events through the given event emitter
|
||||
const sync = await Sync({ ipfs, log, events, onSynced: applyOperation })
|
||||
const sync = await Sync({ ipfs, log, events, onSynced: applyOperation, start: syncAutomatically })
|
||||
|
||||
return {
|
||||
address,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const DocumentStore = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, meta, indexBy = '_id' }) => {
|
||||
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage, meta })
|
||||
const DocumentStore = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, meta, syncAutomatically, indexBy = '_id' }) => {
|
||||
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage, meta, syncAutomatically })
|
||||
|
||||
const { addOperation, log } = database
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const Events = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, meta }) => {
|
||||
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage, meta })
|
||||
const Events = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, meta, syncAutomatically }) => {
|
||||
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage, meta, syncAutomatically })
|
||||
|
||||
const { addOperation, log } = database
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const KeyValue = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, meta }) => {
|
||||
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage, meta })
|
||||
const KeyValue = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, meta, syncAutomatically }) => {
|
||||
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage, meta, syncAutomatically })
|
||||
|
||||
const { addOperation, log } = database
|
||||
|
||||
|
||||
20
src/sync.js
20
src/sync.js
@@ -50,6 +50,8 @@ const Sync = async ({ ipfs, log, events, onSynced, start }) => {
|
||||
|
||||
events = events || new EventEmitter()
|
||||
|
||||
let started = false
|
||||
|
||||
const onPeerJoined = async (peerId) => {
|
||||
const heads = await log.heads()
|
||||
events.emit('join', peerId, heads)
|
||||
@@ -134,15 +136,20 @@ const Sync = async ({ ipfs, log, events, onSynced, start }) => {
|
||||
}
|
||||
|
||||
const add = async (entry) => {
|
||||
await ipfs.pubsub.publish(address, entry.bytes)
|
||||
if (started) {
|
||||
await ipfs.pubsub.publish(address, entry.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
const stopSync = async () => {
|
||||
await queue.onIdle()
|
||||
ipfs.libp2p.pubsub.removeEventListener('subscription-change', handlePeerSubscribed)
|
||||
await ipfs.libp2p.unhandle(headsSyncAddress)
|
||||
await ipfs.pubsub.unsubscribe(address, handleUpdateMessage)
|
||||
peers.clear()
|
||||
if (started) {
|
||||
await queue.onIdle()
|
||||
ipfs.libp2p.pubsub.removeEventListener('subscription-change', handlePeerSubscribed)
|
||||
await ipfs.libp2p.unhandle(headsSyncAddress)
|
||||
await ipfs.pubsub.unsubscribe(address, handleUpdateMessage)
|
||||
peers.clear()
|
||||
started = false
|
||||
}
|
||||
}
|
||||
|
||||
const startSync = async () => {
|
||||
@@ -151,6 +158,7 @@ const Sync = async ({ ipfs, log, events, onSynced, start }) => {
|
||||
ipfs.libp2p.pubsub.addEventListener('subscription-change', handlePeerSubscribed)
|
||||
// Subscribe to the pubsub channel for this database through which updates are sent
|
||||
await ipfs.pubsub.subscribe(address, handleUpdateMessage)
|
||||
started = true
|
||||
}
|
||||
|
||||
// Start Sync automatically
|
||||
|
||||
Reference in New Issue
Block a user