Refactor OrbitDB

Fix sync
Fix linter
Fix tests
Clean up
Set default references count to 0
Fix sync
Use address instead of databaseId
Sync protocol
Keep references to open databases in OrbitDB
Fix append benchmark
Initial version of heads exchange
Remove Feed
Fix KeyValuePersisted iterator
Refactor OrbitDBAddress a bit more
Add rest of the database types
Refactor OrbitDB addresses
Initial version for the full circle
Initial structure and tests for new OrbitDB
Make sure KeyStore is open when a Database is created
Re-organize OrbitDB
Use new databases and Log
More clean up
Add 'drop' event to Database
Clean up OrbitDB
Remove id from OrbitDB
Use new KeyStore and Identities
Remove storage from OrbitDB
Remove migrations from OrbitDB
Remove caches from OrbitDB
Remove pubsub from OrbitDB
This commit is contained in:
haad
2023-03-01 09:12:13 +02:00
parent dd1f27713f
commit a063b3fb4a
74 changed files with 6947 additions and 22396 deletions

View File

@@ -4,7 +4,7 @@ deps:
npm install
test: deps
npm run test:all -- --exit
npm run test -- --exit
build: test
mkdir -p examples/browser/lib/

View File

@@ -1,5 +1,6 @@
import IdentityProvider from 'orbit-db-identity-provider'
import { Log, MemoryStorage } from '../src/log.js'
import { Identities } from '../src/index.js'
import { Log } from '../src/index.js'
import { MemoryStorage, LevelStorage } from '../src/storage/index.js'
// State
let log
@@ -21,13 +22,18 @@ const queryLoop = async () => {
;(async () => {
console.log('Starting benchmark...')
const identity = await IdentityProvider.createIdentity({ id: 'userA' })
// MemoeryStorage is the default storage for Log but defining them here
// in case we want to benchmark different storage modules
const storage = await MemoryStorage()
const stateStorage = await MemoryStorage()
const identities = await Identities()
const testIdentity = await identities.createIdentity({ id: 'userA' })
log = await Log(identity, { logId: 'A', storage, stateStorage })
// MemoryStorage is the default storage for Log but defining them here
// in case we want to benchmark different storage modules
const entryStorage = await MemoryStorage()
const headsStorage = await MemoryStorage()
// Test LevelStorage
// const entryStorage = await LevelStorage({ path: './logA/entries' })
// const headsStorage = await LevelStorage({ path: './logA/heads' })
log = await Log(testIdentity, { logId: 'A', entryStorage, headsStorage })
// Output metrics at 1 second interval
setInterval(() => {
@@ -37,7 +43,7 @@ const queryLoop = async () => {
if (lastTenSeconds === 0) throw new Error('Problems!')
lastTenSeconds = 0
}
console.log(`${queriesPerSecond} queries per second, ${totalQueries} queries in ${seconds} seconds (Entry count: ${log.values.length})`)
console.log(`${queriesPerSecond} queries per second, ${totalQueries} queries in ${seconds} seconds`)
queriesPerSecond = 0
}, 1000)

View File

@@ -20,6 +20,9 @@ export default (env, argv) => {
}
},
target: 'web',
experiments: {
topLevelAwait: true
},
externals: {
fs: '{ existsSync: () => true }',
mkdirp: '{}'

View File

@@ -21,6 +21,9 @@ export default (env, argv) => {
},
target: 'web',
devtool: 'source-map',
experiments: {
topLevelAwait: true
},
externals: {
fs: '{ existsSync: () => true }',
mkdirp: '{}'

View File

@@ -13,15 +13,15 @@ export default (env, argv) => {
target: 'web',
mode: 'development',
devtool: 'source-map',
experiments: {
topLevelAwait: true
},
plugins: [
new webpack.ProvidePlugin({
process: 'process/browser.js',
Buffer: ['buffer', 'Buffer']
})
],
experiments: {
topLevelAwait: true
},
resolve: {
modules: [
'node_modules'

1
dist/ipfslog.min.js vendored

File diff suppressed because one or more lines are too long

25170
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,62 +18,38 @@
"type": "module",
"main": "src/OrbitDB.js",
"dependencies": {
"ipfs-pubsub-1on1": "^0.1.0",
"is-node": "^1.0.2",
"@ipld/dag-cbor": "^9.0.0",
"dids": "^4.0.0",
"it-pipe": "^2.0.5",
"level": "^8.0.0",
"logplease": "^1.2.15",
"orbit-db-access-controllers": "^0.4.0",
"orbit-db-cache": "^0.5.0",
"orbit-db-counterstore": "^2.0.0",
"orbit-db-docstore": "^2.0.0",
"orbit-db-eventstore": "^2.0.0",
"orbit-db-feedstore": "^2.0.0",
"orbit-db-identity-provider": "^0.5.0",
"orbit-db-io": "^3.0.0",
"orbit-db-keystore": "^2.0.0",
"orbit-db-kvstore": "^2.0.0",
"orbit-db-pubsub": "^0.7.0",
"orbit-db-storage-adapter": "^0.9.0",
"orbit-db-store": "^5.0.0",
"lru": "^3.1.0",
"multiformats": "^11.0.1",
"p-queue": "^7.3.4",
"secp256k1": "^5.0.0",
"wherearewe": "^2.0.1"
},
"devDependencies": {
"adm-zip": "^0.4.16",
"babel-cli": "^6.26.0",
"babel-core": "^6.26.3",
"babel-loader": "^9.1.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-polyfill": "^6.26.0",
"babel-preset-env": "^1.7.0",
"chromium": "^3.0.3",
"cpy-cli": "^4.2.0",
"cross-env": "^7.0.3",
"eslint": "^8.32.0",
"fs-extra": "^11.1.0",
"ganache-cli": "^6.12.2",
"go-ipfs": "^0.17.0",
"ipfs": "^0.66.0",
"ipfsd-ctl": "^13.0.0",
"it-all": "^2.0.0",
"key-did-provider-ed25519": "^2.0.1",
"key-did-resolver": "^2.3.0",
"markdown-toc": "^1.2.0",
"mkdirp": "^2.1.1",
"mocha": "^10.2.0",
"node-pre-gyp": "^0.13.0",
"open-cli": "^7.1.0",
"orbit-db-test-utils": "^3.0.0",
"p-each-series": "^3.0.0",
"p-map": "^5.5.0",
"p-map-series": "^3.0.0",
"p-whilst": "^3.0.0",
"pify": "^6.1.0",
"path-browserify": "^1.0.1",
"puppeteer-core": "^19.5.2",
"remark-cli": "^11.0.0",
"remark-validate-links": "^12.1.0",
"rimraf": "^4.1.0",
"standard": "^17.0.0",
"validate-maintainers": "^1.2.2",
"web3": "^1.8.1",
"webpack": "^5.75.0",
"webpack-cli": "^5.0.1"
},
@@ -85,7 +61,7 @@
"lint:docs": "remark -qf -u validate-links .",
"test:all": "npm run test:browser-multiple-tabs && npm run test",
"test": "cross-env TEST=js mocha --config test/.mocharc.json",
"test:browser-multiple-tabs": "npm run build:dist && cpy dist/orbitdb.min.js ./test/browser/ --rename=orbitdb.js --flat && cpy node_modules/ipfs/dist/index.min.js ./test/browser --rename=ipfs.js --flat && cpy node_modules/orbit-db-identity-provider/dist/orbit-db-identity-provider.min.js ./test/browser --rename=identities.js --flat && cpy node_modules/ipfs-log/dist/ipfslog.min.js ./test/browser --flat && mocha ./test/browser/concurrent.spec.js",
"test:browser-multiple-tabs": "npm run build:dist && cpy dist/orbitdb.min.js ./test/browser/ --rename=orbitdb.js --flat && cpy node_modules/ipfs/dist/index.min.js ./test/browser --rename=ipfs.js --flat && mocha ./test/browser/concurrent.spec.js",
"build": "npm run build:dist && npm run build:debug",
"build:examples": "webpack --config conf/webpack.example.config.js",
"build:dist": "webpack --config conf/webpack.config.js",
@@ -96,10 +72,13 @@
"lint:fix": "standard --fix"
},
"standard": {
"env": "mocha",
"env": [ "mocha" ],
"ignore": [
"examples/**",
"benchmarks/**"
"benchmarks/**",
"test/fixtures/**",
"test/browser/**",
"test/access-controllers/**"
]
},
"localMaintainers": [

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,70 @@
import AccessController from './interface.js'
import AccessControllerManifest from './manifest.js'
// import LegacyIPFSAccessController from './access-controllers/legacy-ipfs.js'
import IPFSAccessController from './ipfs.js'
// import OrbitDBAccessController from './orbitdb.js'
const supportedTypes = {
// 'legacy-ipfs': LegacyIPFSAccessController,
ipfs: IPFSAccessController
// orbitdb: OrbitDBAccessController
}
const getHandlerFor = (type) => {
if (!AccessControllers.isSupported(type)) {
throw new Error(`AccessController type '${type}' is not supported`)
}
return supportedTypes[type]
}
export default class AccessControllers {
static get AccessController () { return AccessController }
static isSupported (type) {
return Object.keys(supportedTypes).includes(type)
}
static addAccessController (options) {
if (!options.AccessController) {
throw new Error('AccessController class needs to be given as an option')
}
if (!options.AccessController.type ||
typeof options.AccessController.type !== 'string') {
throw new Error('Given AccessController class needs to implement: static get type() { /* return a string */}.')
}
supportedTypes[options.AccessController.type] = options.AccessController
}
static addAccessControllers (options) {
const accessControllers = options.AccessControllers
if (!accessControllers) {
throw new Error('AccessController classes need to be given as an option')
}
accessControllers.forEach((accessController) => {
AccessControllers.addAccessController({ AccessController: accessController })
})
}
static removeAccessController (type) {
delete supportedTypes[type]
}
static async resolve (orbitdb, manifestAddress, options = {}) {
const { type, params } = await AccessControllerManifest.resolve(orbitdb._ipfs, manifestAddress, options)
const AccessController = getHandlerFor(type)
const accessController = await AccessController.create(orbitdb, Object.assign({}, options, params))
await accessController.load(params.address)
return accessController
}
static async create ({ ipfs, identity }, type, options = {}) {
const AccessController = getHandlerFor(type)
const ac = await AccessController.create({ ipfs, identity }, options)
const params = await ac.save()
const hash = await AccessControllerManifest.create(ipfs, type, params)
return hash
}
}

View File

@@ -0,0 +1,50 @@
import { EventEmitter } from 'events'
/**
* Interface for OrbitDB Access Controllers
*
* Any OrbitDB access controller needs to define and implement
* the methods defined by the interface here.
*/
export default class AccessController extends EventEmitter {
/*
Every AC needs to have a 'Factory' method
that creates an instance of the AccessController
*/
static async create (orbitdb, options) {}
/* Return the type for this controller */
static get type () {
throw new Error('\'static get type ()\' needs to be defined in the inheriting class')
}
/*
Return the type for this controller
NOTE! This is the only property of the interface that
shouldn't be overridden in the inherited Access Controller
*/
get type () {
return this.constructor.type
}
/* Each Access Controller has some address to anchor to */
get address () {}
/*
Called by the databases (the log) to see if entry should
be allowed in the database. Return true if the entry is allowed,
false is not allowed
*/
async canAppend (entry, identityProvider) {}
/* Add and remove access */
async grant (access, identity) { return false }
async revoke (access, identity) { return false }
/* AC creation and loading */
async load (address) {}
/* Returns AC manifest parameters object */
async save () {}
/* Called when the database for this AC gets closed */
async close () {}
}

View File

@@ -0,0 +1,115 @@
// import * as io from '../utils/index.js'
// import AccessController from './interface.js'
// import AccessControllerManifest from './manifest.js'
import { IPFSBlockStorage } from '../storage/index.js'
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'
import { sha256 } from 'multiformats/hashes/sha2'
import { base58btc } from 'multiformats/bases/base58'
const codec = dagCbor
const hasher = sha256
const hashStringEncoding = base58btc
const type = 'ipfs'
const AccessControllerManifest = async ({ storage, type, params }) => {
const manifest = {
type,
...params
}
const { cid, bytes } = await Block.encode({ value: manifest, codec, hasher })
const hash = cid.toString(hashStringEncoding)
await storage.put(hash, bytes)
return hash
}
const IPFSAccessController = async ({ ipfs, identities, identity, address, storage, write }) => {
storage = storage || await IPFSBlockStorage({ ipfs, pin: true })
write = write || [identity.id]
if (address) {
const manifestBytes = await storage.get(address)
const { value } = await Block.decode({ bytes: manifestBytes, codec, hasher })
write = value.write
address = await AccessControllerManifest({ storage, type, params: { write } })
} else {
address = await AccessControllerManifest({ storage, type, params: { write } })
}
const canAppend = async (entry) => {
const writerIdentity = await identities.getIdentity(entry.identity)
if (!writerIdentity) {
return false
}
const { id } = writerIdentity
// Allow if the write access list contain the writer's id or is '*'
if (write.includes(id) || write.includes('*')) {
// Check that the identity is valid
return identities.verifyIdentity(writerIdentity)
}
return false
}
return {
address,
write,
canAppend
}
}
export { IPFSAccessController as default }
// constructor (ipfs, options) {
// super()
// this._ipfs = ipfs
// this._write = Array.from(options.write || [])
// }
// // Returns the type of the access controller
// static get type () { return type }
// // Return a Set of keys that have `access` capability
// get write () {
// return this._write
// }
// async canAppend (entry, identityProvider) {
// // Allow if access list contain the writer's publicKey or is '*'
// const key = entry.identity.id
// if (this.write.includes(key) || this.write.includes('*')) {
// // check identity is valid
// return identityProvider.verifyIdentity(entry.identity)
// }
// return false
// }
// async load (address) {
// // Transform '/ipfs/QmPFtHi3cmfZerxtH9ySLdzpg1yFhocYDZgEZywdUXHxFU'
// // to 'QmPFtHi3cmfZerxtH9ySLdzpg1yFhocYDZgEZywdUXHxFU'
// if (address.indexOf('/ipfs') === 0) { address = address.split('/')[2] }
// try {
// this._write = await io.read(this._ipfs, address)
// } catch (e) {
// console.log('IPFSAccessController.load ERROR:', e)
// }
// }
// async save ({ ipfs }) {
// let cid
// try {
// cid = await io.write(this._ipfs, 'dag-cbor', { write: JSON.stringify(this.write, null, 2) })
// } catch (e) {
// console.log('IPFSAccessController.save ERROR:', e)
// }
// // return the manifest data
// return { address: cid }
// }
// static async create ({ ipfs, identity }, options = {}) {
// options = { ...options, ...{ write: options.write || [identity.id] } }
// return new IPFSAccessController(ipfs, options)
// }
// }

View File

@@ -0,0 +1,33 @@
import * as io from 'orbit-db-io'
export default class AccessControllerManifest {
constructor (type, params = {}) {
this.type = type
this.params = params
}
static async resolve (ipfs, manifestHash, options = {}) {
if (options.skipManifest) {
if (!options.type) {
throw new Error('No manifest, access-controller type required')
}
return new AccessControllerManifest(options.type, { address: manifestHash })
} else {
// TODO: ensure this is a valid multihash
if (manifestHash.indexOf('/ipfs') === 0) { manifestHash = manifestHash.split('/')[2] }
const { type, params } = await io.read(ipfs, manifestHash)
return new AccessControllerManifest(type, params)
}
}
static async create (ipfs, type, params) {
if (params.skipManifest) {
return params.address
}
const manifest = {
type,
params
}
return io.write(ipfs, 'dag-cbor', manifest)
}
}

View File

@@ -0,0 +1,134 @@
import pMapSeries from 'p-map-series'
import AccessController from './interface.js'
import ensureAddress from '../utils/ensure-ac-address.js'
const type = 'orbitdb'
export default class OrbitDBAccessController extends AccessController {
constructor (orbitdb, options) {
super()
this._orbitdb = orbitdb
this._db = null
this._options = options || {}
}
// Returns the type of the access controller
static get type () { return type }
// Returns the address of the OrbitDB used as the AC
get address () {
return this._db.address
}
// Return true if entry is allowed to be added to the database
async canAppend (entry, identityProvider) {
// Write keys and admins keys are allowed
const access = new Set([...this.get('write'), ...this.get('admin')])
// If the ACL contains the writer's public key or it contains '*'
if (access.has(entry.identity.id) || access.has('*')) {
const verifiedIdentity = await identityProvider.verifyIdentity(entry.identity)
// Allow access if identity verifies
return verifiedIdentity
}
return false
}
get capabilities () {
if (this._db) {
const capabilities = this._db.index
const toSet = (e) => {
const key = e[0]
capabilities[key] = new Set([...(capabilities[key] || []), ...e[1]])
}
// Merge with the access controller of the database
// and make sure all values are Sets
Object.entries({
...capabilities,
// Add the root access controller's 'write' access list
// as admins on this controller
...{ admin: new Set([...(capabilities.admin || []), ...this._db.access.write]) }
}).forEach(toSet)
return capabilities
}
return {}
}
get (capability) {
return this.capabilities[capability] || new Set([])
}
async close () {
await this._db.close()
}
async load (address) {
if (this._db) { await this._db.close() }
// Force '<address>/_access' naming for the database
this._db = await this._orbitdb.keyvalue(ensureAddress(address), {
// use ipfs controller as a immutable "root controller"
accessController: {
type: 'ipfs',
write: this._options.admin || [this._orbitdb.identity.id]
},
sync: true
})
this._db.events.on('ready', this._onUpdate.bind(this))
this._db.events.on('write', this._onUpdate.bind(this))
this._db.events.on('replicated', this._onUpdate.bind(this))
await this._db.load()
}
async save () {
// return the manifest data
return {
address: this._db.address.toString()
}
}
async hasCapability (capability, identity) {
// Write keys and admins keys are allowed
const access = new Set(this.get(capability))
return access.has(identity.id) || access.has('*')
}
async grant (capability, key) {
// Merge current keys with the new key
const capabilities = new Set([...(this._db.get(capability) || []), ...[key]])
await this._db.put(capability, Array.from(capabilities.values()))
}
async revoke (capability, key) {
const capabilities = new Set(this._db.get(capability) || [])
capabilities.delete(key)
if (capabilities.size > 0) {
await this._db.put(capability, Array.from(capabilities.values()))
} else {
await this._db.del(capability)
}
}
/* Private methods */
_onUpdate () {
this.emit('updated')
}
/* Factory */
static async create (orbitdb, options = {}) {
const ac = new OrbitDBAccessController(orbitdb, options)
await ac.load(options.address || options.name || 'default-access-controller')
// Add write access from options
if (options.write && !options.address) {
await pMapSeries(options.write, async (e) => ac.grant('write', e))
}
return ac
}
}

View File

@@ -1,16 +1,29 @@
import path from 'path'
import * as io from 'orbit-db-io'
// import * as io from 'orbit-db-io'
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'
import { sha256 } from 'multiformats/hashes/sha2'
import { base58btc } from 'multiformats/bases/base58'
const codec = dagCbor
const hasher = sha256
const hashStringEncoding = base58btc
// Creates a DB manifest file and saves it in IPFS
export default async (ipfs, name, type, accessControllerAddress, options) => {
const manifest = Object.assign({
name,
type,
accessController: (path.posix || path).join('/ipfs', accessControllerAddress)
},
// meta field is only added to manifest if options.meta is defined
options.meta !== undefined ? { meta: options.meta } : {}
export default async (storage, name, type, accessControllerAddress, options) => {
const manifest = Object.assign(
{
name,
type,
accessController: (path.posix || path).join('/ipfs', accessControllerAddress)
},
// meta field is only added to manifest if options.meta is defined
options.meta !== undefined ? { meta: options.meta } : {}
)
return io.write(ipfs, options.format || 'dag-cbor', manifest, options)
const { cid, bytes } = await Block.encode({ value: manifest, codec, hasher })
const hash = cid.toString(hashStringEncoding)
await storage.put(hash, bytes)
return { hash, manifest }
}

View File

@@ -1,20 +1,25 @@
import { EventEmitter } from 'events'
import PQueue from 'p-queue'
import Path from 'path'
import Sync from './sync.js'
import { IPFSBlockStorage, LevelStorage } from '../storage/index.js'
const defaultPointerCount = 16
const Database = async ({ OpLog, ipfs, identity, databaseId, accessController, storage, headsStorage, pointerCount }) => {
const { Log, Entry, IPFSBlockStorage, LevelStorage } = OpLog
const Database = async ({ OpLog, ipfs, identity, address, name, accessController, directory, storage, headsStorage, pointerCount }) => {
const { Log, Entry } = OpLog
const entryStorage = storage || await IPFSBlockStorage({ ipfs, pin: true })
headsStorage = headsStorage || await LevelStorage({ path: `./${identity.id}/${databaseId}/log/_heads/` })
// const indexStorage = await LevelStorage({ path: `./${identity.id}/${databaseId}/log/_index/` })
// const log = await Log(identity, { logId: databaseId, access: accessController, entryStorage, headsStorage, indexStorage })
const log = await Log(identity, { logId: databaseId, access: accessController, entryStorage, headsStorage })
directory = Path.join(directory || './orbitdb', `./${address.path}/`)
headsStorage = headsStorage || await LevelStorage({ path: Path.join(directory, '/log/_heads/') })
const log = await Log(identity, { logId: address.toString(), access: accessController, entryStorage, headsStorage })
// const indexStorage = await LevelStorage({ path: Path.join(directory, '/log/_index/') })
// const log = await Log(identity, { logId: address.toString(), access: accessController, entryStorage, headsStorage, indexStorage })
const events = new EventEmitter()
const queue = new PQueue({ concurrency: 1 })
pointerCount = pointerCount || defaultPointerCount
@@ -22,32 +27,17 @@ const Database = async ({ OpLog, ipfs, identity, databaseId, accessController, s
const addOperation = async (op) => {
const task = async () => {
const entry = await log.append(op, { pointerCount })
await ipfs.pubsub.publish(databaseId, entry.bytes)
await syncProtocol.publish(entry)
events.emit('update', entry)
return entry.hash
}
return queue.add(task)
}
const handleMessage = async (message) => {
const { id: peerId } = await ipfs.id()
const messageIsNotFromMe = (message) => String(peerId) !== String(message.from)
const messageHasData = (message) => message.data !== undefined
try {
if (messageIsNotFromMe(message) && messageHasData(message)) {
await sync(message.data)
}
} catch (e) {
console.error(e)
events.emit('error', e)
}
}
const sync = async (bytes) => {
const applyOperation = async (bytes) => {
const task = async () => {
const entry = await Entry.decode(bytes)
if (entry) {
events.emit('sync', entry)
const updated = await log.joinEntry(entry)
if (updated) {
events.emit('update', entry)
@@ -58,7 +48,7 @@ const Database = async ({ OpLog, ipfs, identity, databaseId, accessController, s
}
const close = async () => {
await ipfs.pubsub.unsubscribe(log.id, handleMessage)
await syncProtocol.stop()
await queue.onIdle()
await log.close()
events.emit('close')
@@ -68,22 +58,23 @@ const Database = async ({ OpLog, ipfs, identity, databaseId, accessController, s
const drop = async () => {
await queue.onIdle()
await log.clear()
events.emit('drop')
}
const merge = async (other) => {}
// Automatically subscribe to the pubsub channel for this database
await ipfs.pubsub.subscribe(log.id, handleMessage)
// Start the Sync protocol
// Sync protocol exchanges OpLog heads (latest known entries) between peers when they connect
// Sync emits 'join', 'leave' and 'error' events through the given event emitter
const syncProtocol = await Sync({ ipfs, log, events, sync: applyOperation })
return {
databaseId,
address,
name,
identity,
sync,
merge,
close,
drop,
addOperation,
log,
peers: syncProtocol.peers,
events
}
}

View File

@@ -1,5 +1,5 @@
const DocumentStore = async ({ OpLog, Database, ipfs, identity, databaseId, accessController, storage, indexBy = '_id' }) => {
const database = await Database({ OpLog, ipfs, identity, databaseId, accessController, storage })
const DocumentStore = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage, indexBy = '_id' }) => {
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage })
const { addOperation, log } = database

View File

@@ -1,5 +1,5 @@
const Events = async ({ OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const database = await Database({ OpLog, ipfs, identity, databaseId, accessController, storage })
const Events = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage }) => {
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage })
const { addOperation, log } = database

View File

@@ -1,17 +1,18 @@
import LevelStorage from '../storage/level.js'
import { KeyValue } from './index.js'
import PQueue from 'p-queue'
import path from 'path'
const valueEncoding = 'json'
const KeyValuePersisted = async ({ KeyValue, OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const keyValueStore = await KeyValue({ OpLog, Database, ipfs, identity, databaseId, accessController, storage })
const KeyValuePersisted = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage }) => {
const keyValueStore = await KeyValue({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage })
const { events, log } = keyValueStore
const queue = new PQueue({ concurrency: 1 })
const path = `./${identity.id}/${databaseId}/_index`
const index = await LevelStorage({ path, valueEncoding: 'json' })
// await index.open()
directory = path.join(directory || './orbitdb', `./${address.path}/_index/`)
const index = await LevelStorage({ path: directory, valueEncoding })
let latestOplogHash
@@ -49,7 +50,7 @@ const KeyValuePersisted = async ({ KeyValue, OpLog, Database, ipfs, identity, da
const iterator = async function * () {
await queue.onIdle()
for await (const [key, value] of index.iterator()) {
for await (const { key, value } of keyValueStore.iterator()) {
yield { key, value }
}
}

View File

@@ -1,5 +1,5 @@
const KeyValue = async ({ OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const database = await Database({ OpLog, ipfs, identity, databaseId, accessController, storage })
const KeyValue = async ({ OpLog, Database, ipfs, identity, address, name, accessController, directory, storage }) => {
const database = await Database({ OpLog, ipfs, identity, address, name, accessController, directory, storage })
const { addOperation, log } = database

113
src/db/sync.js Normal file
View File

@@ -0,0 +1,113 @@
import { pipe } from 'it-pipe'
import PQueue from 'p-queue'
const Sync = async ({ ipfs, log, events, sync }) => {
const address = log.id
const queue = new PQueue({ concurrency: 1 })
let peers = new Set()
const receiveHeads = async (source) => {
for await (const value of source) {
const headBytes = value.subarray()
await sync(headBytes)
}
}
const sendHeads = async (source) => {
return (async function * () {
const heads = await log.heads()
for await (const { bytes } of heads) {
yield bytes
}
})()
}
const handleReceiveHeads = async ({ connection, stream }) => {
peers.add(connection.remotePeer.toString())
try {
await pipe(stream, receiveHeads, sendHeads, stream)
events.emit('join', connection.remotePeer)
} catch (e) {
console.error(e)
events.emit('error', e)
}
}
const handlePeerSubscribed = async (event) => {
const task = async () => {
const { peerId, subscriptions } = event.detail
const subscription = subscriptions.find(e => e.topic === address)
if (!subscription) {
return
}
if (subscription.subscribe) {
if (peers.has(peerId.toString())) {
return
}
try {
peers.add(peerId.toString())
const stream = await ipfs.libp2p.dialProtocol(peerId, '/heads' + address)
await pipe(sendHeads, stream, receiveHeads)
events.emit('join', peerId)
} catch (e) {
if (e.code === 'ERR_UNSUPPORTED_PROTOCOL') {
// Skip peer, they don't have this database currently
console.log(e.message)
} else {
console.error(e)
events.emit('error', e)
}
}
} else {
peers.delete(peerId.toString())
events.emit('leave', peerId)
}
}
await queue.onIdle()
await queue.add(task)
}
const handleUpdateMessage = async (message) => {
const task = async () => {
const { id: peerId } = await ipfs.id()
const messageIsNotFromMe = (message) => String(peerId) !== String(message.from)
const messageHasData = (message) => message.data !== undefined
try {
if (messageIsNotFromMe(message) && messageHasData(message)) {
await sync(message.data)
}
} catch (e) {
console.error(e)
events.emit('error', e)
}
}
await queue.onIdle()
await queue.add(task)
}
const publish = async (entry) => {
await ipfs.pubsub.publish(address.toString(), entry.bytes)
}
const stop = async () => {
await queue.onIdle()
ipfs.libp2p.pubsub.removeEventListener('subscription-change', handlePeerSubscribed)
await ipfs.libp2p.unhandle('/heads' + address)
await ipfs.pubsub.unsubscribe(address, handleUpdateMessage)
peers = new Set()
}
// Exchange head entries with peers when connected
await ipfs.libp2p.handle('/heads' + address, handleReceiveHeads)
ipfs.libp2p.pubsub.addEventListener('subscription-change', handlePeerSubscribed)
// Subscribe to the pubsub channel for this database through which updates are sent
await ipfs.pubsub.subscribe(address, handleUpdateMessage)
return {
publish,
stop
}
}
export { Sync as default }

View File

@@ -1,45 +0,0 @@
import Channel from 'ipfs-pubsub-1on1'
import Logger from 'logplease'
const logger = Logger.create('exchange-heads', { color: Logger.Colors.Yellow })
Logger.setLogLevel('ERROR')
const getHeadsForDatabase = async store => {
if (!(store && store._cache)) return []
const localHeads = await store._cache.get(store.localHeadsPath) || []
const remoteHeads = await store._cache.get(store.remoteHeadsPath) || []
return [...localHeads, ...remoteHeads]
}
export default async (ipfs, address, peer, getStore, getDirectConnection, onMessage, onChannelCreated) => {
const _handleMessage = message => {
const msg = JSON.parse(Buffer.from(message.data).toString())
const { address, heads } = msg
onMessage(address, heads)
}
let channel = getDirectConnection(peer)
if (!channel) {
try {
logger.debug(`Create a channel to ${peer}`)
channel = await Channel.open(ipfs, peer)
channel.on('message', _handleMessage)
logger.debug(`Channel created to ${peer}`)
onChannelCreated(channel)
} catch (e) {
logger.error(e)
}
}
// Wait for the direct channel to be fully connected
await channel.connect()
logger.debug(`Connected to ${peer}`)
// Send the heads if we have any
const heads = await getHeadsForDatabase(getStore(address))
logger.debug(`Send latest heads of '${address}':\n`, JSON.stringify(heads.map(e => e.hash), null, 2))
if (heads) {
await channel.send(JSON.stringify({ address, heads }))
}
return channel
}

View File

@@ -21,6 +21,8 @@ const Identities = async ({ keystore, identityKeysPath, storage, ipfs } = {}) =>
const verifiedIdentitiesCache = await LRUStorage({ size: 1000 })
// await keystore.open()
const getIdentity = async (hash) => {
const bytes = await storage.get(hash)
if (bytes) {

View File

@@ -1,5 +1,5 @@
import IdentityProvider from './interface.js'
import KeyStore, { signMessage, verifyMessage } from '../../key-store.js'
import { signMessage, verifyMessage } from '../../key-store.js'
const type = 'orbitdb'

View File

@@ -1,3 +1,5 @@
export { default as OrbitDB } from './OrbitDB.js'
export { default as OrbitDBAddress, isValidAddress, parseAddress } from './orbit-db-address.js'
export { Log, DefaultAccessController, Entry } from './oplog/index.js'
export { default as KeyStore } from './key-store.js'
export { Identities } from './identities/index.js'
export { Identities, isIdentity } from './identities/index.js'

View File

@@ -125,8 +125,8 @@ const KeyStore = async ({ storage, path } = {}) => {
const decompressedKey = secp256k1.publicKeyConvert(Buffer.from(pubKey), false)
const key = {
publicKey: Buffer.from(decompressedKey),//.toString('hex'),
privateKey: Buffer.from(keys.marshal())//.toString('hex')
publicKey: Buffer.from(decompressedKey), // .toString('hex'),
privateKey: Buffer.from(keys.marshal())// .toString('hex')
}
await addKey(id, key)
@@ -149,7 +149,7 @@ const KeyStore = async ({ storage, path } = {}) => {
if (!storedKey) {
return
}
// return unmarshal(Buffer.from(deserializedKey.privateKey, 'hex'))
return unmarshal(storedKey)
}

View File

@@ -1,41 +0,0 @@
import path from 'path'
import fs from '../fs-shim.js'
import Cache from 'orbit-db-cache'
import Logger from 'logplease'
const logger = Logger.create('orbit-db')
Logger.setLogLevel('ERROR')
export default async function migrate (OrbitDB, options, dbAddress) {
let oldCache = options.cache || (OrbitDB.caches[options.directory] ? OrbitDB.caches[options.directory].cache : null)
let oldStore
if (!oldCache) {
const addr = (path.posix || path).join(OrbitDB.directory, dbAddress.root, dbAddress.path)
if (fs && fs.existsSync && !fs.existsSync(addr)) return
oldStore = await OrbitDB.storage.createStore(addr)
oldCache = new Cache(oldStore)
}
const _localHeads = await oldCache.get('_localHeads')
if (!_localHeads) return
const keyRoot = dbAddress.toString()
logger.debug('Attempting to migrate from old cache location')
const migrationKeys = [
'_remoteHeads',
'_localHeads',
'snapshot',
'queue'
]
for (const i in migrationKeys) {
try {
const key = path.join(keyRoot, migrationKeys[i])
const val = await oldCache.get(migrationKeys[i])
if (val) await options.cache.set(key, val)
} catch (e) {
logger.debug(e.message)
}
}
await options.cache.set(path.join(keyRoot, '_manifest'), dbAddress.root)
if (oldStore) await oldStore.close()
}

View File

@@ -1,9 +0,0 @@
import from021To022 from './0.21-0.22.js'
const migrations = [from021To022]
export const run = async (OrbitDB, options, dbAddress) => {
for (let i = 0; i < migrations.length; i++) {
await migrations[i](OrbitDB, options, dbAddress)
}
}

View File

@@ -133,7 +133,7 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* @param {data} data Payload to add to the entry
* @return {Promise<Entry>} Entry that was appended
*/
const append = async (data, options = { pointerCount: 1 }) => {
const append = async (data, options = { pointerCount: 0 }) => {
// 1. Prepare entry
// 2. Authorize entry
// 3. Store entry

View File

@@ -1,67 +1,59 @@
import path from 'path'
import * as Path from 'path'
import { CID } from 'multiformats/cid'
import { base58btc } from 'multiformats/bases/base58'
const notEmpty = e => e !== '' && e !== ' '
const isValidAddress = (address) => {
address = address.toString()
export default class OrbitDBAddress {
constructor (root, path) {
this.root = root
this.path = path
if (!address.startsWith('/orbitdb') && !address.startsWith('\\orbitdb')) {
return false
}
toString () {
return OrbitDBAddress.join(this.root, this.path)
address = address.replaceAll('/orbitdb/', '')
address = address.replaceAll('\\orbitdb\\', '')
address = address.replaceAll('/', '')
address = address.replaceAll('\\', '')
let cid
try {
cid = CID.parse(address, base58btc)
} catch (e) {
return false
}
static isValid (address) {
address = address.toString().replace(/\\/g, '/')
return cid !== undefined
}
const containsProtocolPrefix = (e, i) => !((i === 0 || i === 1) && address.toString().indexOf('/orbit') === 0 && e === 'orbitdb')
const parts = address.toString()
.split('/')
.filter(containsProtocolPrefix)
.filter(notEmpty)
let accessControllerHash
const validateHash = (hash) => {
const prefixes = ['zd', 'Qm', 'ba', 'k5']
for (const p of prefixes) {
if (hash.indexOf(p) > -1) {
return true
}
}
return false
}
try {
accessControllerHash = validateHash(parts[0])
? CID.parse(parts[0]).toString()
: null
} catch (e) {
return false
}
return accessControllerHash !== null
const parseAddress = (address) => {
if (!address) {
throw new Error(`Not a valid OrbitDB address: ${address}`)
}
static parse (address) {
if (!address) { throw new Error(`Not a valid OrbitDB address: ${address}`) }
if (!OrbitDBAddress.isValid(address)) { throw new Error(`Not a valid OrbitDB address: ${address}`) }
address = address.toString().replace(/\\/g, '/')
const parts = address.toString()
.split('/')
.filter((e, i) => !((i === 0 || i === 1) && address.toString().indexOf('/orbit') === 0 && e === 'orbitdb'))
.filter(e => e !== '' && e !== ' ')
return new OrbitDBAddress(parts[0], parts.slice(1, parts.length).join('/'))
if (!isValidAddress(address)) {
throw new Error(`Not a valid OrbitDB address: ${address}`)
}
static join (...paths) {
return (path.posix || path).join('/orbitdb', ...paths)
return OrbitDBAddress(address)
}
const OrbitDBAddress = (address) => {
if (address && address.protocol === 'orbitdb' && address.path) {
return address
}
const protocol = 'orbitdb'
const path = address.replace('/orbitdb/', '').replace('\\orbitdb\\', '')
const toString = () => {
return (Path.posix || Path).join('/', protocol, '/', path)
}
return {
protocol,
path,
address,
toString
}
}
export { OrbitDBAddress as default, isValidAddress, parseAddress }

View File

@@ -43,13 +43,13 @@ Object.keys(testAPIs).forEach(API => {
id2 = await IdentityProvider.createIdentity({ id: 'B', keystore: keystore2 })
orbitdb1 = await OrbitDB.createInstance(ipfs1, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath1,
identity: id1
})
orbitdb2 = await OrbitDB.createInstance(ipfs2, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath2,
identity: id2
})

View File

@@ -39,13 +39,13 @@ Object.keys(testAPIs).forEach(API => {
id2 = await IdentityProvider.createIdentity({ id: 'B', keystore: keystore2 })
orbitdb1 = await OrbitDB.createInstance(ipfs1, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath1,
identity: id1
})
orbitdb2 = await OrbitDB.createInstance(ipfs2, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath2,
identity: id2
})

View File

@@ -43,13 +43,13 @@ Object.keys(testAPIs).forEach(API => {
id2 = await IdentityProvider.createIdentity({ id: 'B', keystore: keystore2 })
orbitdb1 = await OrbitDB.createInstance(ipfs1, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath1,
identity: id1
})
orbitdb2 = await OrbitDB.createInstance(ipfs2, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath2,
identity: id2
})

View File

@@ -39,13 +39,13 @@ Object.keys(testAPIs).forEach(API => {
id2 = await IdentityProvider.createIdentity({ id: 'B', keystore: keystore2 })
orbitdb1 = await OrbitDB.createInstance(ipfs1, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath1,
identity: id1
})
orbitdb2 = await OrbitDB.createInstance(ipfs2, {
AccessControllers: AccessControllers,
AccessControllers,
directory: dbPath2,
identity: id2
})

View File

@@ -1,54 +1,54 @@
import pkg from 'elliptic'
const { ec: EC } = pkg
const ec = new EC('secp256k1')
// import pkg from 'elliptic'
// const { ec: EC } = pkg
// const ec = new EC('secp256k1')
/**
* A custom keystore example
*/
class CustomTestKeystore {
constructor (signer) {
this.createKey()
}
// /**
// * A custom keystore example
// */
// class CustomTestKeystore {
// constructor (signer) {
// this.createKey()
// }
createKey () {
const key = ec.genKeyPair()
this.key = ec.keyPair({
pub: key.getPublic('hex'),
priv: key.getPrivate('hex'),
privEnc: 'hex',
pubEnc: 'hex'
})
// createKey () {
// const key = ec.genKeyPair()
// this.key = ec.keyPair({
// pub: key.getPublic('hex'),
// priv: key.getPrivate('hex'),
// privEnc: 'hex',
// pubEnc: 'hex'
// })
return this.key
}
// return this.key
// }
getKey () {
return this.key
}
// getKey () {
// return this.key
// }
// TODO: check if this is really in use
generateKey () {
return Promise.resolve(this.createKey())
}
// // TODO: check if this is really in use
// generateKey () {
// return Promise.resolve(this.createKey())
// }
importPublicKey (key) {
return Promise.resolve(ec.keyFromPublic(key, 'hex'))
}
// importPublicKey (key) {
// return Promise.resolve(ec.keyFromPublic(key, 'hex'))
// }
importPrivateKey (key) {
return Promise.resolve(ec.keyFromPrivate(key, 'hex'))
}
// importPrivateKey (key) {
// return Promise.resolve(ec.keyFromPrivate(key, 'hex'))
// }
sign (key, data) {
const sig = ec.sign(data, key)
return Promise.resolve(sig.toDER('hex'))
}
// sign (key, data) {
// const sig = ec.sign(data, key)
// return Promise.resolve(sig.toDER('hex'))
// }
verify (signature, key, data) {
let res = false
res = ec.verify(data, signature, key)
return Promise.resolve(res)
}
}
// verify (signature, key, data) {
// let res = false
// res = ec.verify(data, signature, key)
// return Promise.resolve(res)
// }
// }
export default new CustomTestKeystore()
// export default new CustomTestKeystore()

View File

@@ -12,12 +12,12 @@ const numTabs = 3
const wait = async (milliseconds) => {
return new Promise((resolve, reject) => {
console.log("waiting...")
console.log('waiting...')
setTimeout(resolve, milliseconds)
})
}
describe(`orbit-db - browser concurrent writes`, function () {
describe('orbit-db - browser concurrent writes', function () {
this.timeout(numTabs * config.timeout)
let browser
@@ -37,7 +37,7 @@ describe(`orbit-db - browser concurrent writes`, function () {
})
describe('Write concurrently', function () {
let tabs = []
const tabs = []
before(async () => {
const createTab = async () => {
const page = await browser.newPage()
@@ -82,7 +82,7 @@ describe(`orbit-db - browser concurrent writes`, function () {
return new Promise((resolve, reject) => {
let polls = 0
const interval = setInterval(async () => {
let logHashes = []
const logHashes = []
await mapSeries(tabs, async (page) => {
await page.evaluate(() => loadConsistentLog())
const hash = await page.evaluate(async () => await getConsistentLogHash())
@@ -96,7 +96,7 @@ describe(`orbit-db - browser concurrent writes`, function () {
clearInterval(interval)
resolve()
} catch (e) {
console.log("Repolling...")
console.log('Repolling...')
if (++polls > 5) {
reject(e)
}
@@ -108,7 +108,7 @@ describe(`orbit-db - browser concurrent writes`, function () {
it('no syncLocal option - Multiple tabs do not converge to same log', async () => {
return new Promise((resolve, reject) => {
const interval = setInterval(async () => {
let logHashes = []
const logHashes = []
await mapSeries(tabs, async (page) => {
const hash = await page.evaluate(async () => await getInconsistentLogHash())
logHashes.push(hash)

View File

@@ -91,7 +91,6 @@
// await db.close()
// })
// it('throws an error if database type doesn\'t match', async () => {
// let err, log, kv
// try {

View File

@@ -55,7 +55,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
describe('Default index \'_id\'', () => {
beforeEach(async () => {
db = await DocumentStore({ OpLog, Database, ipfs, identity: testIdentity1, databaseId, accessController })
db = await DocumentStore({ OpLog, Database, ipfs, identity: testIdentity1, address: databaseId, accessController })
})
afterEach(async () => {
@@ -66,7 +66,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('creates a document store', async () => {
strictEqual(db.databaseId, databaseId)
strictEqual(db.address.toString(), databaseId)
strictEqual(db.type, 'documentstore')
strictEqual(db.indexBy, '_id')
})
@@ -159,7 +159,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
describe('Custom index \'doc\'', () => {
beforeEach(async () => {
db = await DocumentStore({ OpLog, Database, ipfs, identity: testIdentity1, databaseId, accessController, indexBy: 'doc' })
db = await DocumentStore({ OpLog, Database, ipfs, identity: testIdentity1, address: databaseId, accessController, indexBy: 'doc' })
})
afterEach(async () => {
@@ -170,7 +170,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('creates a document store', async () => {
strictEqual(db.databaseId, databaseId)
strictEqual(db.address.toString(), databaseId)
strictEqual(db.type, 'documentstore')
strictEqual(db.indexBy, 'doc')
})

View File

@@ -55,7 +55,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
beforeEach(async () => {
db = await EventStore({ OpLog, Database, ipfs, identity: testIdentity1, databaseId, accessController })
db = await EventStore({ OpLog, Database, ipfs, identity: testIdentity1, address: databaseId, accessController })
})
afterEach(async () => {
@@ -66,7 +66,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('creates an event store', async () => {
strictEqual(db.databaseId, databaseId)
strictEqual(db.address.toString(), databaseId)
strictEqual(db.type, 'eventstore')
})

View File

@@ -54,7 +54,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
beforeEach(async () => {
db = await KeyValuePersisted({ OpLog, KeyValue, Database, ipfs, identity: testIdentity1, databaseId, accessController })
db = await KeyValuePersisted({ OpLog, KeyValue, Database, ipfs, identity: testIdentity1, address: databaseId, accessController })
})
afterEach(async () => {
@@ -65,7 +65,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('creates a keyvalue store', async () => {
strictEqual(db.databaseId, databaseId)
strictEqual(db.address.toString(), databaseId)
strictEqual(db.type, 'keyvalue')
})
@@ -178,7 +178,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
const all = []
for await (const pair of db.iterator()) {
all.push(pair)
all.unshift(pair)
}
deepStrictEqual(all, keyvalue)

View File

@@ -1,5 +1,4 @@
import { deepStrictEqual, strictEqual } from 'assert'
import mapSeries from 'p-map-series'
import rimraf from 'rimraf'
import { Log, Entry } from '../../src/oplog/index.js'
import { KeyValue, Database } from '../../src/db/index.js'
@@ -17,7 +16,6 @@ Object.keys(testAPIs).forEach((IPFS) => {
let ipfsd
let ipfs
let keystore, signingKeyStore
let accessController
let identities1
let testIdentity1
@@ -26,15 +24,14 @@ Object.keys(testAPIs).forEach((IPFS) => {
const databaseId = 'keyvalue-AAA'
before(async () => {
// Start two IPFS instances
rmrf('./orbitdb')
ipfsd = await startIpfs(IPFS, config.daemon1)
ipfs = ipfsd.api
const [identities, testIdentities] = await createTestIdentities(ipfs)
identities1 = identities[0]
testIdentity1 = testIdentities[0]
rmrf(testIdentity1.id)
})
after(async () => {
@@ -43,19 +40,11 @@ Object.keys(testAPIs).forEach((IPFS) => {
if (ipfsd) {
await stopIpfs(ipfsd)
}
if (keystore) {
await keystore.close()
}
if (signingKeyStore) {
await signingKeyStore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
rmrf('./orbitdb')
})
beforeEach(async () => {
db = await KeyValue({ OpLog, Database, ipfs, identity: testIdentity1, databaseId, accessController })
db = await KeyValue({ OpLog, Database, ipfs, identity: testIdentity1, address: databaseId, accessController })
})
afterEach(async () => {
@@ -66,7 +55,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('creates a keyvalue store', async () => {
strictEqual(db.databaseId, databaseId)
strictEqual(db.address.toString(), databaseId)
strictEqual(db.type, 'keyvalue')
})
@@ -97,7 +86,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
const key = 'key1'
const expected = 'value1'
const hash = await db.put(key, expected)
await db.put(key, expected)
const actual = await db.get(key)
strictEqual(actual, expected)
})

View File

@@ -2,139 +2,113 @@ import { deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { Log, Entry } from '../../../src/oplog/index.js'
import { DocumentStore, Database } from '../../../src/db/index.js'
import { IPFSBlockStorage, LevelStorage } from '../../../src/storage/index.js'
import { getIpfsPeerId, waitForPeers, config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import { config, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from '../../utils/connect-nodes.js'
import { createTestIdentities, cleanUpTestIdentities } from '../../fixtures/orbit-db-identity-keys.js'
import waitFor from '../../utils/wait-for.js'
const { sync: rmrf } = rimraf
const OpLog = { Log, Entry, IPFSBlockStorage, LevelStorage }
const OpLog = { Log, Entry }
const IPFS = 'js-ipfs'
Object.keys(testAPIs).forEach((IPFS) => {
describe('DocumentStore Replication (' + IPFS + ')', function () {
this.timeout(config.timeout * 2)
describe('Documents Database Replication', function () {
this.timeout(config.timeout * 2)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeyStore
let peerId1, peerId2
let accessController
let identities1, identities2
let testIdentity1, testIdentity2
let db1, db2
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let identities1, identities2
let testIdentity1, testIdentity2
let db1, db2
const databaseId = 'documentstore-AAA'
const databaseId = 'documentstore-AAA'
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
const accessController = {
canAppend: async (entry) => {
const identity1 = await identities1.getIdentity(entry.identity)
const identity2 = await identities2.getIdentity(entry.identity)
return identity1.id === testIdentity1.id || identity2.id === testIdentity2.id
}
}
await connectPeers(ipfs1, ipfs2)
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
await connectPeers(ipfs1, ipfs2)
const [identities, testIdentities] = await createTestIdentities(ipfs1, ipfs2)
identities1 = identities[0]
identities2 = identities[1]
testIdentity1 = testIdentities[0]
testIdentity2 = testIdentities[1]
const [identities, testIdentities] = await createTestIdentities(ipfs1, ipfs2)
identities1 = identities[0]
identities2 = identities[1]
testIdentity1 = testIdentities[0]
testIdentity2 = testIdentities[1]
})
accessController = {
canAppend: async (entry) => {
const identity1 = await identities1.getIdentity(entry.identity)
const identity2 = await identities2.getIdentity(entry.identity)
return identity1.id === testIdentity1.id || identity2.id === testIdentity2.id
}
}
after(async () => {
await cleanUpTestIdentities([identities1, identities1])
rmrf(testIdentity1.id)
rmrf(testIdentity2.id)
})
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
after(async () => {
await cleanUpTestIdentities([identities1, identities1])
beforeEach(async () => {
db1 = await DocumentStore({ OpLog, Database, ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
db2 = await DocumentStore({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
})
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeyStore) {
await signingKeyStore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
if (testIdentity2) {
rmrf(testIdentity2.id)
}
})
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
if (db2) {
await db2.drop()
await db2.close()
}
})
beforeEach(async () => {
db1 = await DocumentStore({ OpLog, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
db2 = await DocumentStore({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
})
it('gets all documents', async () => {
let updateDB1Count = 0
let updateDB2Count = 0
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
if (db2) {
await db2.drop()
await db2.close()
}
})
const onDB1Update = (entry) => {
++updateDB1Count
}
it('gets all documents', async () => {
let updateDB1Count = 0
let updateDB2Count = 0
const onDB2Update = (entry) => {
++updateDB2Count
}
const onDB1Update = (entry) => {
++updateDB1Count
}
db1.events.on('update', onDB1Update)
db2.events.on('update', onDB2Update)
const onDB2Update = (entry) => {
++updateDB2Count
}
await db1.put({ _id: 1, msg: 'record 1 on db 1' })
await db2.put({ _id: 2, msg: 'record 2 on db 2' })
await db1.put({ _id: 3, msg: 'record 3 on db 1' })
await db2.put({ _id: 4, msg: 'record 4 on db 2' })
db1.events.on('update', onDB1Update)
db2.events.on('update', onDB2Update)
await waitFor(() => updateDB1Count, () => 4)
await waitFor(() => updateDB2Count, () => 4)
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
const all1 = []
for await (const item of db1.iterator()) {
all1.unshift(item)
}
const puts = []
puts.push(await db1.put({ _id: 1, msg: 'record 1 on db 1' }))
puts.push(await db2.put({ _id: 2, msg: 'record 2 on db 2' }))
puts.push(await db1.put({ _id: 3, msg: 'record 3 on db 1' }))
puts.push(await db2.put({ _id: 4, msg: 'record 4 on db 2' }))
const all2 = []
for await (const item of db2.iterator()) {
all2.unshift(item)
}
await waitFor(() => updateDB1Count, () => puts.length)
await waitFor(() => updateDB2Count, () => puts.length)
const all1 = []
for await (const item of db1.iterator()) {
all1.unshift(item)
}
const all2 = []
for await (const item of db2.iterator()) {
all2.unshift(item)
}
deepStrictEqual(all1, all2)
})
deepStrictEqual(all1, all2)
})
})

View File

@@ -1,144 +1,188 @@
import { deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import rmrf from 'rimraf'
import { Log, Entry } from '../../../src/oplog/index.js'
import { EventStore, Database } from '../../../src/db/index.js'
import { IPFSBlockStorage, LevelStorage } from '../../../src/storage/index.js'
import { getIpfsPeerId, waitForPeers, config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from '../../utils/connect-nodes.js'
import { createTestIdentities, cleanUpTestIdentities } from '../../fixtures/orbit-db-identity-keys.js'
import waitFor from '../../utils/wait-for.js'
const { sync: rmrf } = rimraf
// Test utils
import { config, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from '../../utils/connect-nodes.js'
import waitFor from '../../utils/wait-for.js'
import { createTestIdentities, cleanUpTestIdentities } from '../../fixtures/orbit-db-identity-keys.js'
const OpLog = { Log, Entry, IPFSBlockStorage, LevelStorage }
const IPFS = 'js-ipfs'
Object.keys(testAPIs).forEach((IPFS) => {
describe('EventStore Replication (' + IPFS + ')', function () {
this.timeout(config.timeout * 2)
describe('Events Database Replication', function () {
this.timeout(5000)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeyStore
let peerId1, peerId2
let accessController
let identities1, identities2
let testIdentity1, testIdentity2
let db1, db2
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let identities1, identities2
let testIdentity1, testIdentity2
let db1, db2
const databaseId = 'events-AAA'
const databaseId = 'events-AAA'
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
const accessController = {
canAppend: async (entry) => {
const identity = await identities1.getIdentity(entry.identity)
return identity.id === testIdentity1.id
}
}
await connectPeers(ipfs1, ipfs2)
const expected = [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
]
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
before(async () => {
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
const [identities, testIdentities] = await createTestIdentities(ipfs1, ipfs2)
identities1 = identities[0]
identities2 = identities[1]
testIdentity1 = testIdentities[0]
testIdentity2 = testIdentities[1]
await connectPeers(ipfs1, ipfs2)
accessController = {
canAppend: async (entry) => {
const identity1 = await identities1.getIdentity(entry.identity)
const identity2 = await identities2.getIdentity(entry.identity)
return identity1.id === testIdentity1.id || identity2.id === testIdentity2.id
}
}
const [identities, testIdentities] = await createTestIdentities(ipfs1, ipfs2)
identities1 = identities[0]
identities2 = identities[1]
testIdentity1 = testIdentities[0]
testIdentity2 = testIdentities[1]
rmrf(testIdentity1.id)
rmrf(testIdentity2.id)
})
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
after(async () => {
await cleanUpTestIdentities([identities1, identities2])
after(async () => {
await cleanUpTestIdentities([identities1, identities2])
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeyStore) {
await signingKeyStore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
if (testIdentity2) {
rmrf(testIdentity2.id)
}
})
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
beforeEach(async () => {
db1 = await EventStore({ OpLog, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
db2 = await EventStore({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
})
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
if (db2) {
await db2.drop()
await db2.close()
}
})
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
}
if (db2) {
await db2.drop()
await db2.close()
}
})
it('gets all documents', async () => {
let updateDB1Count = 0
let updateDB2Count = 0
it('replicates a database', async () => {
let connected = false
let updateCount = 0
const onDB1Update = (entry) => {
++updateDB1Count
}
const onConnected = async (peerId) => {
connected = true
}
const onDB2Update = (entry) => {
++updateDB2Count
}
const onUpdate = async (peerId) => {
++updateCount
}
db1.events.on('update', onDB1Update)
db2.events.on('update', onDB2Update)
const onError = (err) => {
console.error(err)
}
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
db1 = await EventStore({ OpLog, Database, ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
db2 = await EventStore({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
const puts = []
puts.push(await db1.add('init'))
puts.push(await db2.add(true))
puts.push(await db1.add('hello'))
puts.push(await db2.add('friend'))
puts.push(await db2.add('12345'))
puts.push(await db2.add('empty'))
puts.push(await db2.add(''))
puts.push(await db2.add('friend33'))
db2.events.on('join', onConnected)
db1.events.on('join', onConnected)
db2.events.on('update', onUpdate)
db2.events.on('error', onError)
db1.events.on('error', onError)
await waitFor(() => updateDB1Count, () => puts.length)
await waitFor(() => updateDB2Count, () => puts.length)
await db1.add(expected[0])
await db1.add(expected[1])
await db1.add(expected[2])
await db1.add(expected[3])
await db1.add(expected[4])
await db1.add(expected[5])
await db1.add(expected[6])
await db1.add(expected[7])
const all1 = []
for await (const record of db1.iterator()) {
all1.unshift(record)
}
await waitFor(() => connected, () => true)
await waitFor(() => updateCount > 0, () => true)
const all2 = []
for await (const record of db2.iterator()) {
all2.unshift(record)
}
const all2 = []
for await (const event of db2.iterator()) {
all2.unshift(event)
}
deepStrictEqual(all2, expected)
deepStrictEqual(all1, all2)
})
const all1 = await db2.all()
deepStrictEqual(all1, expected)
})
it('loads the database after replication', async () => {
db1 = await EventStore({ OpLog, Database, ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
db2 = await EventStore({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
let connected = false
let updateCount = 0
const onConnected = async (peerId) => {
connected = true
}
const onUpdate = async (peerId) => {
++updateCount
}
const onError = (err) => {
console.error(err)
}
db2.events.on('join', onConnected)
db2.events.on('update', onUpdate)
db2.events.on('error', onError)
db1.events.on('error', onError)
await db1.add(expected[0])
await db1.add(expected[1])
await db1.add(expected[2])
await db1.add(expected[3])
await db1.add(expected[4])
await db1.add(expected[5])
await db1.add(expected[6])
await db1.add(expected[7])
await waitFor(() => connected, () => true)
await waitFor(() => updateCount > 0, () => true)
await db1.drop()
await db1.close()
db1 = null
await db2.close()
db2 = await EventStore({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
const all2 = []
for await (const event of db2.iterator()) {
all2.unshift(event)
}
deepStrictEqual(all2, expected)
const all1 = await db2.all()
deepStrictEqual(all1, expected)
})
})

View File

@@ -1,34 +1,34 @@
import { deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import rmrf from 'rimraf'
import { Log, Entry } from '../../../src/oplog/index.js'
import { KeyValue, Database } from '../../../src/db/index.js'
import { IPFSBlockStorage, LevelStorage } from '../../../src/storage/index.js'
import { getIpfsPeerId, waitForPeers, config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import { KeyValue, KeyValuePersisted, Database } from '../../../src/db/index.js'
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from '../../utils/connect-nodes.js'
import { createTestIdentities, cleanUpTestIdentities } from '../../fixtures/orbit-db-identity-keys.js'
import waitFor from '../../utils/wait-for.js'
import { createTestIdentities, cleanUpTestIdentities } from '../../fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const OpLog = { Log, Entry, IPFSBlockStorage, LevelStorage }
const OpLog = { Log, Entry }
Object.keys(testAPIs).forEach((IPFS) => {
describe('KeyValue Replication (' + IPFS + ')', function () {
this.timeout(config.timeout * 2)
describe('KeyValue Database Replication (' + IPFS + ')', function () {
this.timeout(config.timeout)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeyStore
let peerId1, peerId2
let accessController
let identities1, identities2
let testIdentity1, testIdentity2
let db1, db2
let kv1, kv2
const databaseId = 'keyvalue-AAA'
const databaseId = 'kv-AAA'
const accessController = {
canAppend: async (entry) => {
const identity = await identities1.getIdentity(entry.identity)
return identity.id === testIdentity1.id
}
}
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
@@ -36,26 +36,14 @@ Object.keys(testAPIs).forEach((IPFS) => {
await connectPeers(ipfs1, ipfs2)
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
const [identities, testIdentities] = await createTestIdentities(ipfs1, ipfs2)
identities1 = identities[0]
identities2 = identities[1]
testIdentity1 = testIdentities[0]
testIdentity2 = testIdentities[1]
accessController = {
canAppend: async (entry) => {
const identity1 = await identities1.getIdentity(entry.identity)
const identity2 = await identities2.getIdentity(entry.identity)
return identity1.id === testIdentity1.id || identity2.id === testIdentity2.id
}
}
rmrf(testIdentity1.id)
rmrf(testIdentity2.id)
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
after(async () => {
@@ -67,78 +55,162 @@ Object.keys(testAPIs).forEach((IPFS) => {
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeyStore) {
await signingKeyStore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
if (testIdentity2) {
rmrf(testIdentity2.id)
}
})
beforeEach(async () => {
db1 = await KeyValue({ OpLog, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
db2 = await KeyValue({ OpLog, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
afterEach(async () => {
if (db1) {
await db1.drop()
await db1.close()
if (kv1) {
await kv1.drop()
await kv1.close()
}
if (db2) {
await db2.drop()
await db2.close()
if (kv2) {
await kv2.drop()
await kv2.close()
}
})
it('gets all key/value pairs', async () => {
let updateDB1Count = 0
let updateDB2Count = 0
it('replicates a database', async () => {
let connected = false
let updateCount = 0
const onDB1Update = (entry) => {
++updateDB1Count
const onConnected = async (peerId) => {
connected = true
}
const onDB2Update = (entry) => {
++updateDB2Count
const onUpdate = (entry) => {
++updateCount
}
db1.events.on('update', onDB1Update)
db2.events.on('update', onDB2Update)
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
const ops = []
ops.push(await db1.put('key1', 'init'))
ops.push(await db2.put('key2', true))
ops.push(await db1.put('key3', 'hello'))
ops.push(await db2.put('key4', 'friend'))
ops.push(await db2.put('key5', '12345'))
ops.push(await db2.put('key6', 'empty'))
ops.push(await db2.put('key7', ''))
ops.push(await db2.put('key8', 'friend33'))
await waitFor(() => updateDB1Count, () => ops.length)
await waitFor(() => updateDB2Count, () => ops.length)
const all1 = []
for await (const record of db1.iterator()) {
all1.unshift(record)
const onError = (err) => {
console.error(err)
}
kv1 = await KeyValuePersisted({ KeyValue, OpLog, Database, ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
kv2 = await KeyValuePersisted({ KeyValue, OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
kv2.events.on('join', onConnected)
kv1.events.on('join', onConnected)
kv2.events.on('update', onUpdate)
kv2.events.on('error', onError)
kv1.events.on('error', onError)
await kv1.set('init', true)
await kv1.set('hello', 'friend')
await kv1.del('hello')
await kv1.set('hello', 'friend2')
await kv1.del('hello')
await kv1.set('empty', '')
await kv1.del('empty')
await kv1.set('hello', 'friend3')
await waitFor(() => connected, () => true)
await waitFor(() => updateCount > 0, () => true)
const value0 = await kv2.get('init')
deepStrictEqual(value0, true)
const value2 = await kv2.get('hello')
deepStrictEqual(value2, 'friend3')
const value1 = await kv1.get('hello')
deepStrictEqual(value1, 'friend3')
const value9 = await kv1.get('empty')
deepStrictEqual(value9, undefined)
const all2 = []
for await (const record of db2.iterator()) {
all2.unshift(record)
for await (const keyValue of kv2.iterator()) {
all2.push(keyValue)
}
deepStrictEqual(all2, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
const all1 = []
for await (const keyValue of kv1.iterator()) {
all1.push(keyValue)
}
deepStrictEqual(all1, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
})
it('loads the database after replication', async () => {
let updateCount = 0
let connected = false
const onConnected = async (peerId) => {
connected = true
}
deepStrictEqual(all1, all2)
const onUpdate = (entry) => {
++updateCount
}
const onError = (err) => {
console.error(err)
}
kv1 = await KeyValuePersisted({ KeyValue, OpLog, Database, ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
kv2 = await KeyValuePersisted({ KeyValue, OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
kv2.events.on('join', onConnected)
kv1.events.on('join', onConnected)
kv2.events.on('update', onUpdate)
kv2.events.on('error', onError)
kv1.events.on('error', onError)
await kv1.set('init', true)
await kv1.set('hello', 'friend')
await kv1.del('hello')
await kv1.set('hello', 'friend2')
await kv1.del('hello')
await kv1.set('empty', '')
await kv1.del('empty')
await kv1.set('hello', 'friend3')
await waitFor(() => connected, () => true)
await waitFor(() => updateCount > 0, () => true)
await kv1.close()
await kv2.close()
kv1 = await KeyValuePersisted({ KeyValue, OpLog, Database, ipfs: ipfs1, identity: testIdentity1, address: databaseId, accessController, directory: './orbitdb1' })
kv2 = await KeyValuePersisted({ KeyValue, OpLog, Database, ipfs: ipfs2, identity: testIdentity2, address: databaseId, accessController, directory: './orbitdb2' })
const value0 = await kv2.get('init')
deepStrictEqual(value0, true)
const value2 = await kv2.get('hello')
deepStrictEqual(value2, 'friend3')
const value1 = await kv1.get('hello')
deepStrictEqual(value1, 'friend3')
const value9 = await kv1.get('empty')
deepStrictEqual(value9, undefined)
const all2 = []
for await (const keyValue of kv2.iterator()) {
all2.push(keyValue)
}
deepStrictEqual(all2, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
const all1 = []
for await (const keyValue of kv1.iterator()) {
all1.push(keyValue)
}
deepStrictEqual(all1, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
})
})
})

View File

@@ -54,7 +54,7 @@
// address = db.address.toString()
// await db.close()
// db = await orbitdb1.open(address)
// //Load 10 items
// await db.load(10)
@@ -84,7 +84,6 @@
// assert.equal(Object.keys(db.index).length, 10)
// })
// it('reopen store and load 20 items more than exists', async () => {
// await db.close()

View File

@@ -56,7 +56,7 @@ const cleanUpTestIdentities = async (identities) => {
await identity.keystore.close()
}
rmrf('./keys_1')
rmrf('./orbitdb')
// rmrf('./orbitdb')
}
export {

View File

@@ -1,5 +1,4 @@
import assert from 'assert'
import path from 'path'
import rmrf from 'rimraf'
import KeyStore, { signMessage, verifyMessage } from '../../src/key-store.js'
import Identities, { addIdentityProvider } from '../../src/identities/identities.js'

View File

@@ -1,5 +1,4 @@
import assert from 'assert'
import path from 'path'
import rmrf from 'rimraf'
import KeyStore, { signMessage, verifyMessage } from '../../src/key-store.js'
import Identities, { addIdentityProvider } from '../../src/identities/identities.js'
@@ -116,7 +115,7 @@ describe('Ethereum Identity Provider', function () {
publicKey,
signatures: {
id: '<sig>',
publicKey: signatures.publicKey,
publicKey: signatures.publicKey
},
type
})

View File

@@ -1,15 +1,12 @@
import assert from 'assert'
import path from 'path'
import rmrf from 'rimraf'
import LevelStorage from '../../src/storage/level.js'
import KeyStore, { signMessage, verifyMessage } from '../../src/key-store.js'
import Identities, { addIdentityProvider } from '../../src/identities/identities.js'
import Identity from '../../src/identities/identity.js'
import fs from 'fs-extra'
const fixturesPath = path.resolve('./test/identities/fixtures/keys')
import testKeysPath from '../fixtures/test-keys-path.js '
const savedKeysPath = path.resolve('./test/identities/fixtures/savedKeys')
const identityKeysPath = path.resolve('./test/identities/identityKeys')
import testKeysPath from '../fixtures/test-keys-path.js '
const type = 'orbitdb'
describe('Identities', function () {
@@ -94,7 +91,6 @@ describe('Identities', function () {
assert.strictEqual(identity.id, externalId)
})
it('created a key for id in identity-keystore', async () => {
const key = await keystore.getKey(id)
assert.notStrictEqual(key, undefined)
@@ -148,7 +144,6 @@ describe('Identities', function () {
identity = await identities.createIdentity({ id })
})
after(async () => {
if (identities) {
await identities.keystore.close()

View File

@@ -14,7 +14,7 @@ describe('Identity', function () {
const expectedHash = 'zdpuArx43BnXdDff5rjrGLYrxUomxNroc2uaocTgcWK76UfQT'
const expectedBytes = Uint8Array.from([
164,98,105,100,120,39,48,120,48,49,50,51,52,53,54,55,56,57,48,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,100,116,121,112,101,103,111,114,98,105,116,100,98,105,112,117,98,108,105,99,75,101,121,104,60,112,117,98,107,101,121,62,106,115,105,103,110,97,116,117,114,101,115,162,98,105,100,114,115,105,103,110,97,116,117,114,101,32,102,111,114,32,60,105,100,62,105,112,117,98,108,105,99,75,101,121,120,39,115,105,103,110,97,116,117,114,101,32,102,111,114,32,60,112,117,98,108,105,99,75,101,121,32,43,32,105,100,83,105,103,110,97,116,117,114,101,62
164, 98, 105, 100, 120, 39, 48, 120, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 48, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 100, 116, 121, 112, 101, 103, 111, 114, 98, 105, 116, 100, 98, 105, 112, 117, 98, 108, 105, 99, 75, 101, 121, 104, 60, 112, 117, 98, 107, 101, 121, 62, 106, 115, 105, 103, 110, 97, 116, 117, 114, 101, 115, 162, 98, 105, 100, 114, 115, 105, 103, 110, 97, 116, 117, 114, 101, 32, 102, 111, 114, 32, 60, 105, 100, 62, 105, 112, 117, 98, 108, 105, 99, 75, 101, 121, 120, 39, 115, 105, 103, 110, 97, 116, 117, 114, 101, 32, 102, 111, 114, 32, 60, 112, 117, 98, 108, 105, 99, 75, 101, 121, 32, 43, 32, 105, 100, 83, 105, 103, 110, 97, 116, 117, 114, 101, 62
])
let identity
@@ -57,7 +57,7 @@ describe('Identity', function () {
it('throws and error if publicKey was not given in constructor', async () => {
let err
try {
identity = await Identity({ id: 'abc'})
identity = await Identity({ id: 'abc' })
} catch (e) {
err = e.toString()
}

View File

@@ -1,14 +1,6 @@
import { strictEqual, deepStrictEqual } from 'assert'
import LevelStorage from '../src/storage/level.js'
import LRUStorage from '../src/storage/lru.js'
import ComposedStorage from '../src/storage/composed.js'
import KeyStore, { signMessage, verifyMessage } from '../src/key-store.js'
import { testAPIs } from 'orbit-db-test-utils'
import path from 'path'
import fs from 'fs-extra'
import rmrf from 'rimraf'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
import { Identities } from '../src/identities/index.js'
import testKeysPath from './fixtures/test-keys-path.js '
Object.keys(testAPIs).forEach((IPFS) => {
@@ -190,12 +182,12 @@ Object.keys(testAPIs).forEach((IPFS) => {
const expected = {
type: 'Buffer',
data: [
4, 231, 36, 122, 76, 21, 91, 99, 209, 130, 162,
60, 112, 203, 111, 232, 186, 46, 68, 188, 158, 157,
98, 220, 69, 212, 196, 22, 124, 205, 233, 89, 68,
241, 61, 179, 199, 7, 218, 46, 224, 227, 253, 107,
165, 49, 202, 239, 159, 134, 235, 121, 19, 32, 35,
120, 108, 214, 19, 158, 197, 235, 237, 79, 174
4, 231, 36, 122, 76, 21, 91, 99, 209, 130, 162,
60, 112, 203, 111, 232, 186, 46, 68, 188, 158, 157,
98, 220, 69, 212, 196, 22, 124, 205, 233, 89, 68,
241, 61, 179, 199, 7, 218, 46, 224, 227, 253, 107,
165, 49, 202, 239, 159, 134, 235, 121, 19, 32, 35,
120, 108, 214, 19, 158, 197, 235, 237, 79, 174
]
}
const publicKey = await keystore.getPublic(key, { format: 'buffer' })
@@ -214,11 +206,11 @@ Object.keys(testAPIs).forEach((IPFS) => {
const expected = {
type: 'Buffer',
data: [
2, 231, 36, 122, 76, 21, 91, 99,
209, 130, 162, 60, 112, 203, 111, 232,
186, 46, 68, 188, 158, 157, 98, 220,
69, 212, 196, 22, 124, 205, 233, 89,
68
2, 231, 36, 122, 76, 21, 91, 99,
209, 130, 162, 60, 112, 203, 111, 232,
186, 46, 68, 188, 158, 157, 98, 220,
69, 212, 196, 22, 124, 205, 233, 89,
68
]
}

View File

@@ -16,7 +16,7 @@
// testAPIs,
// } from 'orbit-db-test-utils'
// const storage = storageAdapter()
// const storage = storageAdapter()
// const dbPath1 = './orbitdb/tests/offline/db1'
// const dbPath2 = './orbitdb/tests/offline/db2'

View File

@@ -10,7 +10,6 @@ import KeyStore from '../../src/key-store.js'
// Test utils
import { config, testAPIs } from 'orbit-db-test-utils'
const { createIdentity } = Identities
const { sync: rmrf } = rimraf
let testIdentity

View File

@@ -1,16 +1,12 @@
import { strictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log } from '../../src/oplog/index.js'
import { Identities } from '../../src/identities/index.js'
import KeyStore from '../../src/key-store.js'
import MemoryStorage from '../../src/storage/memory.js'
import LevelStorage from '../../src/storage/level.js'
import { config, testAPIs } from 'orbit-db-test-utils'
import testKeysPath from '../fixtures/test-keys-path.js '
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
let testIdentity, testIdentity2, testIdentity3
@@ -18,16 +14,14 @@ Object.keys(testAPIs).forEach((IPFS) => {
describe('Log - CRDT (' + IPFS + ')', function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath } = config
const { identityKeysPath } = config
let keystore
let identities1, identities2, identities3
let identities1
before(async () => {
keystore = await KeyStore({ path: testKeysPath })
const storage = await MemoryStorage()
identities1 = await Identities({ keystore })
testIdentity = await identities1.createIdentity({ id: 'userA' })
testIdentity2 = await identities1.createIdentity({ id: 'userB' })

View File

@@ -4,12 +4,10 @@ import { copy } from 'fs-extra'
import { Entry } from '../../src/oplog/index.js'
import { Identities } from '../../src/identities/index.js'
import KeyStore from '../../src/key-store.js'
import LevelStorage from '../../src/storage/level.js'
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import testKeysPath from '../fixtures/test-keys-path.js '
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
const { create, isEntry } = Entry
Object.keys(testAPIs).forEach((IPFS) => {
@@ -18,7 +16,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath } = config
let keystore, ipfsBlockStore, identityStore
let keystore
let identities
let testIdentity
let ipfsd, ipfs
@@ -26,19 +24,19 @@ Object.keys(testAPIs).forEach((IPFS) => {
before(async () => {
ipfsd = await startIpfs(IPFS, config.daemon1)
ipfs = ipfsd.api
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, identityKeysPath)
keystore = await KeyStore({ path: testKeysPath })
identities = await Identities({ keystore, ipfs })
testIdentity = await identities.createIdentity({ id: 'userA' })
})
after(async () => {
await keystore.close()
if (ipfsd) {
await stopIpfs(ipfsd)
}
@@ -73,8 +71,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
strictEqual(entry.refs.length, 0)
// strictEqual(entry.hash, expectedHash)
})
it('retrieves the identity from an entry', async() => {
it('retrieves the identity from an entry', async () => {
const expected = {
id: testIdentity.id,
publicKey: testIdentity.publicKey,
@@ -83,7 +81,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
hash: testIdentity.hash,
bytes: testIdentity.bytes,
sign: undefined,
verify: undefined,
verify: undefined
}
const payload = 'hello world'
const entry = await create(testIdentity, 'A', payload)

View File

@@ -9,7 +9,6 @@ import { config, testAPIs } from 'orbit-db-test-utils'
import testKeysPath from '../fixtures/test-keys-path.js '
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
let testIdentity

View File

@@ -13,7 +13,6 @@ import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from '../fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
const { createLogWithSixteenEntries } = LogCreator
Object.keys(testAPIs).forEach((IPFS) => {
@@ -27,7 +26,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
let testIdentity, testIdentity2, testIdentity3
before(async () => {
keystore = await KeyStore({ storage: await LevelStorage({ path: './keys_1', valueEncoding: 'json' })})
keystore = await KeyStore({ storage: await LevelStorage({ path: './keys_1', valueEncoding: 'json' }) })
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)

View File

@@ -1,26 +1,18 @@
import { strictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log } from '../../src/oplog/index.js'
import { Identities } from '../../src/identities/index.js'
import KeyStore from '../../src/key-store.js'
import LevelStorage from '../../src/storage/level.js'
import MemoryStorage from '../../src/storage/memory.js'
import { config, testAPIs } from 'orbit-db-test-utils'
import testKeysPath from '../fixtures/test-keys-path.js '
const { sync: rmrf } = rimraf
let testIdentity, testIdentity2
Object.keys(testAPIs).forEach(IPFS => {
describe('Log - Join Concurrent Entries (' + IPFS + ')', function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath } = config
let keystore
let identities1, identities2
let identities1
before(async () => {
keystore = await KeyStore({ path: testKeysPath })

View File

@@ -11,7 +11,6 @@ import { config, testAPIs } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from '../fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
const last = (arr) => {
return arr[arr.length - 1]

View File

@@ -6,10 +6,8 @@ import bigLogString from '../fixtures/big-log.fixture.js'
import { Identities } from '../../src/identities/index.js'
import KeyStore from '../../src/key-store.js'
import LogCreator from './utils/log-creator.js'
import MemoryStorage from '../../src/storage/memory.js'
// Test utils
import { config, MemStore, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import testKeysPath from '../fixtures/test-keys-path.js '
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
const { sync: rmrf } = rimraf
const { LastWriteWins } = Sorting
@@ -49,7 +47,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, identityKeysPath)
keystore = await KeyStore({ storage: await LevelStorage({ path: identityKeysPath, valueEncoding: 'json' }) })
keystore = await KeyStore({ path: testKeysPath })
testIdentity = await createIdentity({ id: 'userC', keystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore })
@@ -57,10 +55,6 @@ Object.keys(testAPIs).forEach((IPFS) => {
testIdentity4 = await createIdentity({ id: 'userA', keystore })
ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig)
ipfs = ipfsd.api
const memstore = new MemStore()
ipfs.object.put = memstore.put.bind(memstore)
ipfs.object.get = memstore.get.bind(memstore)
})
after(async () => {

View File

@@ -4,14 +4,12 @@ import { Log, Entry } from '../../src/oplog/index.js'
import { Identities } from '../../src/identities/index.js'
import KeyStore from '../../src/key-store.js'
import { copy } from 'fs-extra'
import LevelStorage from '../../src/storage/level.js'
import MemoryStorage from '../../src/storage/memory.js'
import { config, testAPIs } from 'orbit-db-test-utils'
import testKeysPath from '../fixtures/test-keys-path.js '
const { sync: rmrf } = rimraf
const { create } = Entry
const { createIdentity } = Identities
let testIdentity

View File

@@ -4,13 +4,11 @@ import { copy } from 'fs-extra'
import { Log } from '../../src/oplog/index.js'
import { Identities } from '../../src/identities/index.js'
import KeyStore from '../../src/key-store.js'
import LevelStorage from '../../src/storage/level.js'
import MemoryStorage from '../../src/storage/memory.js'
import testKeysPath from '../fixtures/test-keys-path.js '
import { config, testAPIs } from 'orbit-db-test-utils'
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
let testIdentity
@@ -20,7 +18,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath } = config
let keystore, signingKeyStore
let keystore
let identities
before(async () => {

View File

@@ -1,17 +1,11 @@
import { strictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log, Entry } from '../../src/index.js'
import { MemoryStorage, IPFSBlockStorage } from '../../src/storage/index.js'
import { Identities } from '../../src/identities/index.js'
import { IPFSBlockStorage } from '../../src/storage/index.js'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs, getIpfsPeerId, waitForPeers, connectPeers } from 'orbit-db-test-utils'
import { createTestIdentities, cleanUpTestIdentities } from '../fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
Object.keys(testAPIs).forEach((IPFS) => {
describe('ipfs-log - Replication (' + IPFS + ')', function () {
this.timeout(config.timeout * 2)

View File

@@ -1,15 +1,8 @@
import { notStrictEqual, strictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { notStrictEqual, strictEqual } from 'assert'
import { Log } from '../../src/oplog/index.js'
import { Identities } from '../../src/identities/index.js'
// Test utils
import { config, testAPIs } from 'orbit-db-test-utils'
import { createTestIdentities, cleanUpTestIdentities } from '../fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
Object.keys(testAPIs).forEach((IPFS) => {
describe('Signed Log (' + IPFS + ')', function () {
this.timeout(config.timeout)
@@ -150,12 +143,12 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('throws an error upon join if entry doesn\'t have append access', async () => {
const testACL = {
canAppend: async (entry) => {
const identity = await identities1.getIdentity(entry.identity)
return identity && identity.id !== testIdentity2.id
}
const testACL = {
canAppend: async (entry) => {
const identity = await identities1.getIdentity(entry.identity)
return identity && identity.id !== testIdentity2.id
}
}
const log1 = await Log(testIdentity, { logId: 'A', access: testACL })
const log2 = await Log(testIdentity2, { logId: 'A' })

View File

@@ -1,118 +1,149 @@
// import assert from 'assert'
// import rmrf from 'rimraf'
// import OrbitDB from '../src/OrbitDB.js'
// import OrbitDBAddress from '../src/orbit-db-address.js'
// import {
// config,
// startIpfs,
// stopIpfs,
// testAPIs
// } from 'orbit-db-test-utils'
import { strictEqual, notStrictEqual } from 'assert'
import { OrbitDBAddress, isValidAddress, parseAddress } from '../src/index.js'
// const dbPath = './orbitdb/tests/orbit-db-address'
describe('OrbitDB Address', function () {
describe('Creating an address from full address string', () => {
it('creates an address from full address string', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
notStrictEqual(addr, undefined)
})
// Object.keys(testAPIs).forEach(API => {
// describe(`orbit-db - OrbitDB Address (${API})`, function() {
// this.timeout(config.timeout)
it('has a protocol prefix', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
strictEqual(addr.protocol, 'orbitdb')
})
// let ipfsd, ipfs, orbitdb
it('has a path', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
strictEqual(addr.path, 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
})
})
// before(async () => {
// rmrf.sync(dbPath)
// ipfsd = await startIpfs(API, config.daemon1)
// ipfs = ipfsd.api
// orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
// })
describe('Creating an address from hash string', () => {
it('creates an address', () => {
const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
notStrictEqual(addr, undefined)
})
// after(async () => {
// if(orbitdb)
// await orbitdb.stop()
it('has a protocol prefix', () => {
const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
strictEqual(addr.protocol, 'orbitdb')
})
// if (ipfsd)
// await stopIpfs(ipfsd)
// })
it('has a path', () => {
const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
strictEqual(addr.path, 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
})
})
// describe('Parse Address', () => {
// it('throws an error if address is empty', () => {
// let err
// try {
// const result = OrbitDB.parseAddress('')
// } catch (e) {
// err = e.toString()
// }
// assert.equal(err, 'Error: Not a valid OrbitDB address: ')
// })
describe('Converting address to a string', () => {
it('outputs a valid address string', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
const result = addr.toString()
strictEqual(result, address)
})
})
// it('parse address successfully', () => {
// const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
// const result = OrbitDB.parseAddress(address)
describe('isValid Address', () => {
it('is not valid if address is an empty string', () => {
const result = isValidAddress('')
strictEqual(result, false)
})
// const isInstanceOf = result instanceof OrbitDBAddress
// assert.equal(isInstanceOf, true)
it('is a valid address', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const result = isValidAddress(address)
strictEqual(result, true)
})
// assert.equal(result.root, 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
// assert.equal(result.path, 'first-database')
it('is a valid address if it\'s another instance of OrbitDBAddress', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const addr = OrbitDBAddress(address)
const result = isValidAddress(addr)
strictEqual(result, true)
})
// assert.equal(result.toString().indexOf('/orbitdb'), 0)
// assert.equal(result.toString().indexOf('zd'), 9)
// })
it('is not valid address if it\'s missing the /orbitdb prefix', () => {
const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const result = isValidAddress(address)
// it('parse address with backslashes (win32) successfully', () => {
// const address = '\\orbitdb\\Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC\\first-database'
// const result = OrbitDB.parseAddress(address)
strictEqual(result, false)
})
// const isInstanceOf = result instanceof OrbitDBAddress
// assert.equal(isInstanceOf, true)
it('is not a valid address if the multihash is invalid - v0', () => {
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzc'
const result = isValidAddress(address)
// assert.equal(result.root, 'Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC')
// assert.equal(result.path, 'first-database')
strictEqual(result, false)
})
// assert.equal(result.toString().indexOf('/orbitdb'), 0)
// assert.equal(result.toString().indexOf('Qm'), 9)
// })
// })
it('is not a valid address if the multihash is invalid - v2', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw133333'
const result = isValidAddress(address)
// describe('isValid Address', () => {
// it('returns false for empty string', () => {
// const result = OrbitDB.isValidAddress('')
// assert.equal(result, false)
// })
strictEqual(result, false)
})
// it('validate address successfully', () => {
// const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
// const result = OrbitDB.isValidAddress(address)
it('is a valid address in win32 format', () => {
const address = '\\orbitdb\\Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC'
const result = isValidAddress(address)
// assert.equal(result, true)
// })
strictEqual(result, true)
})
})
// it('handle missing orbitdb prefix', () => {
// const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
// const result = OrbitDB.isValidAddress(address)
describe('Parsing an address', () => {
it('parses a valid address', () => {
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const result = parseAddress(address)
// assert.equal(result, true)
// })
strictEqual(result.protocol, 'orbitdb')
strictEqual(result.path, 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
// it('handle missing db address name', () => {
// const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
// const result = OrbitDB.isValidAddress(address)
strictEqual(result.toString().indexOf('/orbitdb'), 0)
strictEqual(result.toString().indexOf('zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'), 9)
})
// assert.equal(result, true)
// })
it('parses a valid address in win32 format', () => {
const address = '\\orbitdb\\Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC'
const result = parseAddress(address)
// it('handle invalid multihash', () => {
// const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzc/first-database'
// const result = OrbitDB.isValidAddress(address)
strictEqual(result.protocol, 'orbitdb')
strictEqual(result.path, 'Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC')
// assert.equal(result, false)
// })
strictEqual(result.toString().indexOf('/orbitdb'), 0)
strictEqual(result.toString().indexOf('Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC'), 9)
})
// it('validate address with backslashes (win32) successfully', () => {
// const address = '\\orbitdb\\Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC\\first-database'
// const result = OrbitDB.isValidAddress(address)
it('throws an error if address is empty', () => {
let err
try {
parseAddress('')
} catch (e) {
err = e.toString()
}
strictEqual(err, 'Error: Not a valid OrbitDB address: ')
})
// assert.equal(result, true)
// })
// })
it('throws an error if address contains too many parts', () => {
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzc/this-should-not-be-here'
// })
// })
let err
try {
parseAddress(address)
} catch (e) {
err = e
}
notStrictEqual(err, undefined)
strictEqual(err.message, `Not a valid OrbitDB address: ${address}`)
})
})
})

470
test/orbitdb-open.test.js Normal file
View File

@@ -0,0 +1,470 @@
import { deepStrictEqual, strictEqual, notStrictEqual } from 'assert'
import rmrf from 'rimraf'
import fs from 'fs'
import path from 'path'
import { OrbitDB, isValidAddress } from '../src/index.js'
import { KeyValuePersisted } from '../src/db/index.js'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from './utils/connect-nodes.js'
import waitFor from './utils/wait-for.js'
Object.keys(testAPIs).forEach((IPFS) => {
describe('Open databases (' + IPFS + ')', function () {
this.timeout(config.timeout)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let orbitdb1, orbitdb2
before(async () => {
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
await rmrf('./orbitdb')
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
after(async () => {
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
await rmrf('./orbitdb')
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
describe('creating a database instance', () => {
let db
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1', directory: './orbitdb1' })
db = await orbitdb1.open('helloworld')
})
after(async () => {
if (db) {
await db.drop()
await db.close()
}
if (orbitdb1) {
await orbitdb1.stop()
}
await rmrf('./orbitdb1')
})
it('creates a database instance', async () => {
notStrictEqual(db, undefined)
})
it('has an address', async () => {
notStrictEqual(db.address, undefined)
})
it('has a valid OrbitDB address', async () => {
strictEqual(isValidAddress(db.address), true)
})
it('has a name', async () => {
strictEqual(db.name, 'helloworld')
})
it('has an identity', async () => {
notStrictEqual(db.identity, undefined)
})
it('has a close function', async () => {
notStrictEqual(db.close, undefined)
strictEqual(typeof db.close, 'function')
})
it('has a drop function', async () => {
notStrictEqual(db.drop, undefined)
strictEqual(typeof db.drop, 'function')
})
it('has a addOperation function', async () => {
notStrictEqual(db.addOperation, undefined)
strictEqual(typeof db.addOperation, 'function')
})
it('has a log', async () => {
notStrictEqual(db.log, undefined)
})
it('has a log where the logId matches the databaseId', async () => {
strictEqual(db.log.id, db.address.toString())
})
it('has a events emitter', async () => {
notStrictEqual(db.events, undefined)
})
it('has a type', async () => {
notStrictEqual(db.type, undefined)
})
it('has a type that equals the database type', async () => {
strictEqual(db.type, 'eventstore')
})
it('has a put function', async () => {
notStrictEqual(db.put, undefined)
strictEqual(typeof db.put, 'function')
})
it('has a add function', async () => {
notStrictEqual(db.add, undefined)
strictEqual(typeof db.add, 'function')
})
it('has a get function', async () => {
notStrictEqual(db.get, undefined)
strictEqual(typeof db.get, 'function')
})
it('has an iterator function', async () => {
notStrictEqual(db.iterator, undefined)
strictEqual(typeof db.iterator, 'function')
})
it('has an all function', async () => {
notStrictEqual(db.all, undefined)
strictEqual(typeof db.all, 'function')
})
it('creates a directory for the database oplog', async () => {
const expectedPath = path.join(orbitdb1.directory, `./${db.address.path}`, '/log/_heads')
const directoryExists = fs.existsSync(expectedPath)
strictEqual(directoryExists, true)
})
})
describe('opening a database', () => {
let db
const amount = 10
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1' })
db = await orbitdb1.open('helloworld')
for (let i = 0; i < amount; i++) {
await db.add('hello' + i)
}
await db.close()
})
after(async () => {
if (db) {
await db.close()
}
if (orbitdb1) {
await orbitdb1.stop()
}
await rmrf('./orbitdb')
})
it('returns all entries in the database', async () => {
db = await orbitdb1.open('helloworld')
strictEqual(db.type, 'eventstore')
strictEqual(db.name, 'helloworld')
const expected = []
for (let i = 0; i < amount; i++) {
expected.push('hello' + i)
}
const all = []
for await (const event of db.iterator()) {
all.unshift(event)
}
deepStrictEqual(all, expected)
})
})
describe('opening a database as a different user', () => {
let db, address
const amount = 10
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1' })
db = await orbitdb1.open('helloworld')
for (let i = 0; i < amount; i++) {
await db.add('hello' + i)
}
address = db.address
await db.close()
await orbitdb1.stop()
orbitdb2 = await OrbitDB({ ipfs: ipfs2, id: 'user2' })
})
after(async () => {
if (db) {
await db.close()
}
if (orbitdb2) {
await orbitdb2.stop()
}
await rmrf('./orbitdb')
})
it('returns all entries in the database', async () => {
db = await orbitdb2.open(address)
strictEqual(db.type, 'eventstore')
strictEqual(db.name, 'helloworld')
const expected = []
for (let i = 0; i < amount; i++) {
expected.push('hello' + i)
}
const all = []
for await (const event of db.iterator()) {
all.unshift(event)
}
deepStrictEqual(all, expected)
})
})
describe('opening a replicated database', () => {
let db1, db2
let address
const amount = 10
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1', directory: './orbitdb1' })
orbitdb2 = await OrbitDB({ ipfs: ipfs2, id: 'user2', directory: './orbitdb2' })
db1 = await orbitdb1.open('helloworld2')
for (let i = 0; i < amount; i++) {
await db1.add('hello' + i)
}
address = db1.address
})
after(async () => {
if (db1) {
await db1.close()
}
if (db2) {
await db2.close()
}
if (orbitdb1) {
await orbitdb1.stop()
}
if (orbitdb2) {
await orbitdb2.stop()
}
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
it('replicates the database', async () => {
console.time('replicate')
let updateCount = 0
let connected = false
const onError = (err) => {
console.error(err)
}
const onConnected = async (peerId) => {
connected = true
}
const onUpdate = (entry) => {
++updateCount
}
db2 = await orbitdb2.open(address)
db2.events.on('error', onError)
db2.events.on('update', onUpdate)
db2.events.on('join', onConnected)
await waitFor(() => connected, () => true)
await waitFor(() => updateCount > 0, () => true)
const expected = []
for (let i = 0; i < amount; i++) {
expected.push('hello' + i)
}
const all = []
for await (const event of db2.iterator()) {
all.unshift(event)
}
console.timeEnd('replicate')
deepStrictEqual(all, expected)
})
it('opens the replicated database', async () => {
if (db1) {
await db1.drop()
await db1.close()
db1 = null
}
if (db2) {
await db2.close()
}
if (orbitdb1) {
await orbitdb1.stop()
orbitdb1 = null
}
db2 = await orbitdb2.open(address)
const expected = []
for (let i = 0; i < amount; i++) {
expected.push('hello' + i)
}
const all = []
for await (const event of db2.iterator()) {
all.unshift(event)
}
deepStrictEqual(all, expected)
})
})
describe('opening a keyvalue database', () => {
let db, address
const amount = 10
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1' })
db = await orbitdb1.open('helloworld', { type: 'keyvalue' })
address = db.address
for (let i = 0; i < amount; i++) {
await db.put('hello' + i, 'hello' + i)
}
await db.close()
})
after(async () => {
if (db) {
await db.close()
}
if (orbitdb1) {
await orbitdb1.stop()
}
await rmrf('./orbitdb')
})
it('returns all entries in the database', async () => {
db = await orbitdb1.open(address)
strictEqual(db.type, 'keyvalue')
strictEqual(db.name, 'helloworld')
const expected = []
for (let i = 0; i < amount; i++) {
expected.push({ key: 'hello' + i, value: 'hello' + i })
}
const all = []
for await (const { key, value } of db.iterator()) {
all.unshift({ key, value })
}
deepStrictEqual(all, expected)
})
it('opens the database with a custom Store - KeyValuePersisted', async () => {
if (db) {
await db.close()
}
db = await orbitdb1.open(address, { Store: KeyValuePersisted })
strictEqual(db.type, 'keyvalue')
strictEqual(db.name, 'helloworld')
const expected = []
for (let i = 0; i < amount; i++) {
expected.push({ key: 'hello' + i, value: 'hello' + i })
}
const all = []
for await (const { key, value } of db.iterator()) {
all.unshift({ key, value })
}
deepStrictEqual(all, expected)
})
})
describe('opening an documents database', () => {
let db, address
const amount = 10
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1' })
db = await orbitdb1.open('helloworld', { type: 'documents' })
address = db.address
for (let i = 0; i < amount; i++) {
await db.put({ _id: 'hello' + i, msg: 'hello' + i })
}
await db.close()
})
after(async () => {
if (db) {
await db.close()
}
if (orbitdb1) {
await orbitdb1.stop()
}
await rmrf('./orbitdb')
})
it('returns all entries in the database', async () => {
db = await orbitdb1.open(address)
strictEqual(db.type, 'documentstore')
strictEqual(db.name, 'helloworld')
const expected = []
for (let i = 0; i < amount; i++) {
expected.push({ _id: 'hello' + i, msg: 'hello' + i })
}
const all = []
for await (const doc of db.iterator()) {
all.unshift(doc)
}
deepStrictEqual(all, expected)
})
})
})
})

View File

@@ -0,0 +1,126 @@
import { strictEqual, deepStrictEqual } from 'assert'
import rmrf from 'rimraf'
import { OrbitDB } from '../src/index.js'
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from './utils/connect-nodes.js'
import waitFor from './utils/wait-for.js'
Object.keys(testAPIs).forEach((IPFS) => {
describe('Replicating databases (' + IPFS + ')', function () {
this.timeout(config.timeout * 2)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let orbitdb1, orbitdb2
before(async () => {
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
})
after(async () => {
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
describe('replicating a database', () => {
let db1, db2
const amount = 128 + 1 // Same amount as in oplog replication test
before(async () => {
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1', directory: './orbitdb1' })
orbitdb2 = await OrbitDB({ ipfs: ipfs2, id: 'user2', directory: './orbitdb2' })
db1 = await orbitdb1.open('helloworld')
for (let i = 0; i < amount; i++) {
await db1.add('hello' + i)
}
})
after(async () => {
if (db1) {
await db1.close()
}
if (db2) {
await db2.close()
}
if (orbitdb1) {
await orbitdb1.stop()
}
if (orbitdb2) {
await orbitdb2.stop()
}
await rmrf('./orbitdb1')
await rmrf('./orbitdb2')
})
it('returns all entries in the replicated database', async () => {
console.time('replicate2')
let replicated = false
const onConnected = async (peerId) => {
const head = (await db2.log.heads())[0]
if (head && head.clock.time === amount) {
replicated = true
}
}
const onUpdated = (entry) => {
if (entry.clock.time === amount) {
replicated = true
}
}
const onError = (err) => {
console.error(err)
}
db1.events.on('error', onError)
db2 = await orbitdb2.open(db1.address)
db2.events.on('join', onConnected)
db2.events.on('update', onUpdated)
db2.events.on('error', onError)
await waitFor(() => replicated, () => true)
strictEqual(db1.address, db2.address)
strictEqual(db1.name, db2.name)
strictEqual(db1.type, db2.type)
const all2 = []
console.time('all2')
for await (const event of db2.iterator()) {
all2.unshift(event)
}
console.timeEnd('all2')
console.timeEnd('replicate2')
const expected = []
for (let i = 0; i < amount; i++) {
expected.push('hello' + i)
}
deepStrictEqual(all2, expected)
const all1 = []
console.time('all1')
for await (const event of db1.iterator()) {
all1.unshift(event)
}
console.timeEnd('all1')
deepStrictEqual(all1, expected)
})
})
})
})

265
test/orbitdb.test.js Normal file
View File

@@ -0,0 +1,265 @@
import { strictEqual, notStrictEqual } from 'assert'
import rimraf from 'rimraf'
import fs from 'fs'
import path from 'path'
import { OrbitDB, isIdentity } from '../src/index.js'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import connectPeers from './utils/connect-nodes.js'
const { sync: rmrf } = rimraf
Object.keys(testAPIs).forEach((IPFS) => {
describe('OrbitDB (' + IPFS + ')', function () {
this.timeout(config.timeout)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let orbitdb1
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
})
after(async () => {
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
})
describe('OrbitDB instance creation - defaults', () => {
before(async () => {
rmrf('./orbitdb')
orbitdb1 = await OrbitDB({ ipfs: ipfs1 })
})
after(async () => {
if (orbitdb1) {
await orbitdb1.stop()
}
rmrf('./orbitdb')
})
it('has an IPFS instance', async () => {
notStrictEqual(orbitdb1.ipfs, undefined)
strictEqual(typeof orbitdb1.ipfs, 'object')
})
it('has the IPFS instance given as a parameter', async () => {
const { id: expectedId } = await ipfs1.id()
const { id: resultId } = await orbitdb1.ipfs.id()
strictEqual(expectedId, resultId)
})
it('has a directory', async () => {
notStrictEqual(orbitdb1.directory, undefined)
strictEqual(typeof orbitdb1.directory, 'string')
})
it('has the directory given as a parameter', async () => {
strictEqual(orbitdb1.directory, './orbitdb')
})
it('has a keystore', async () => {
notStrictEqual(orbitdb1.keystore, undefined)
strictEqual(typeof orbitdb1.keystore, 'object')
})
it('has a keystore that contains a private key for the created identity', async () => {
const privateKey = await orbitdb1.keystore.getKey(orbitdb1.identity.id)
notStrictEqual(privateKey, undefined)
strictEqual(privateKey.constructor.name, 'Secp256k1PrivateKey')
notStrictEqual(privateKey._key, undefined)
notStrictEqual(privateKey._publicKey, undefined)
})
it('has a keystore that contains a public key that matches the identity\'s public key', async () => {
const privateKey = await orbitdb1.keystore.getKey(orbitdb1.identity.id)
const publicKey = await orbitdb1.keystore.getPublic(privateKey)
notStrictEqual(publicKey, undefined)
strictEqual(typeof publicKey, 'string')
strictEqual(publicKey, orbitdb1.identity.publicKey)
})
it('creates a directory for the keystore', async () => {
const directoryExists = fs.existsSync(path.join('./orbitdb/keystore'))
strictEqual(directoryExists, true)
})
it('has an identity', async () => {
notStrictEqual(orbitdb1.identity, undefined)
strictEqual(typeof orbitdb1.identity, 'object')
})
it('creates a valid identity', async () => {
strictEqual(isIdentity(orbitdb1.identity), true)
})
it('has a peerId', async () => {
notStrictEqual(orbitdb1.peerId, undefined)
})
it('has a peerId of type Ed25519PeerIdImpl', async () => {
strictEqual(orbitdb1.peerId.constructor.name, 'Ed25519PeerIdImpl')
})
it('has a peerId that matches the IPFS id', async () => {
const { id } = await ipfs1.id()
strictEqual(orbitdb1.peerId, id)
})
it('has an open function', async () => {
notStrictEqual(orbitdb1.open, undefined)
strictEqual(typeof orbitdb1.open, 'function')
})
it('has a stop function', async () => {
notStrictEqual(orbitdb1.stop, undefined)
strictEqual(typeof orbitdb1.stop, 'function')
})
})
describe('OrbitDB instance creation - user given parameters', () => {
before(async () => {
rmrf('./orbitdb1')
orbitdb1 = await OrbitDB({ ipfs: ipfs1, id: 'user1', directory: './orbitdb1' })
})
after(async () => {
if (orbitdb1) {
await orbitdb1.stop()
}
rmrf('./orbitdb1')
})
it('has an IPFS instance', async () => {
notStrictEqual(orbitdb1.ipfs, undefined)
strictEqual(typeof orbitdb1.ipfs, 'object')
})
it('has the IPFS instance given as a parameter', async () => {
const { id: expectedId } = await ipfs1.id()
const { id: resultId } = await orbitdb1.ipfs.id()
strictEqual(expectedId, resultId)
})
it('has a directory', async () => {
notStrictEqual(orbitdb1.directory, undefined)
strictEqual(typeof orbitdb1.directory, 'string')
})
it('has the directory given as a parameter', async () => {
strictEqual(orbitdb1.directory, './orbitdb1')
})
it('has a keystore', async () => {
notStrictEqual(orbitdb1.keystore, undefined)
strictEqual(typeof orbitdb1.keystore, 'object')
})
it('has a keystore that contains a private key for the created identity', async () => {
const privateKey = await orbitdb1.keystore.getKey(orbitdb1.identity.id)
notStrictEqual(privateKey, undefined)
strictEqual(privateKey.constructor.name, 'Secp256k1PrivateKey')
notStrictEqual(privateKey._key, undefined)
notStrictEqual(privateKey._publicKey, undefined)
})
it('has a keystore that contains a public key that matches the identity\'s public key', async () => {
const privateKey = await orbitdb1.keystore.getKey(orbitdb1.identity.id)
const publicKey = await orbitdb1.keystore.getPublic(privateKey)
notStrictEqual(publicKey, undefined)
strictEqual(typeof publicKey, 'string')
strictEqual(publicKey, orbitdb1.identity.publicKey)
})
it('creates a directory for the keystore', async () => {
const directoryExists = fs.existsSync(path.join('./orbitdb1/keystore'))
strictEqual(directoryExists, true)
})
it('has an identity', async () => {
notStrictEqual(orbitdb1.identity, undefined)
strictEqual(typeof orbitdb1.identity, 'object')
})
it('creates a valid identity', async () => {
strictEqual(isIdentity(orbitdb1.identity), true)
})
it('has a peerId', async () => {
notStrictEqual(orbitdb1.peerId, undefined)
})
it('has a peerId of type Ed25519PeerIdImpl', async () => {
strictEqual(orbitdb1.peerId.constructor.name, 'Ed25519PeerIdImpl')
})
it('has a peerId that matches the IPFS id', async () => {
const { id } = await ipfs1.id()
strictEqual(orbitdb1.peerId, id)
})
it('has an open function', async () => {
notStrictEqual(orbitdb1.open, undefined)
strictEqual(typeof orbitdb1.open, 'function')
})
it('has a stop function', async () => {
notStrictEqual(orbitdb1.stop, undefined)
strictEqual(typeof orbitdb1.stop, 'function')
})
})
describe('OrbitDB instance creation - errors', () => {
after(async () => {
if (orbitdb1) {
await orbitdb1.stop()
}
})
it('throws an error if given an empty parameters object', async () => {
let err
try {
orbitdb1 = await OrbitDB({})
} catch (e) {
err = e
}
notStrictEqual(err, undefined)
strictEqual(err.message, 'IPFS instance is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
})
it('throws an error if IPFS instance is not given', async () => {
let err
try {
orbitdb1 = await OrbitDB()
} catch (e) {
err = e
}
notStrictEqual(err, undefined)
strictEqual(err.message, 'IPFS instance is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
})
it('doesn\'t create the data directory when an error occurs', async () => {
try {
orbitdb1 = await OrbitDB()
} catch (e) {
}
const dataDirectoryExists = fs.existsSync(path.join('./orbitdb'))
const keysDirectoryExists = fs.existsSync(path.join('./orbitdb/keystore'))
strictEqual(dataDirectoryExists, false)
strictEqual(keysDirectoryExists, false)
})
})
})
})

View File

@@ -4,13 +4,12 @@ import rimraf from 'rimraf'
import { Log } from '../src/oplog/index.js'
import { Identities } from '../src/identities/index.js'
import KeyStore from '../src/key-store.js'
import { IPFSBlockStorage, MemoryStorage, LRUStorage, ComposedStorage, LevelStorage } from '../src/storage/index.js'
import { IPFSBlockStorage, MemoryStorage, LRUStorage, ComposedStorage } from '../src/storage/index.js'
import { copy } from 'fs-extra'
import { config, testAPIs } from 'orbit-db-test-utils'
import testKeysPath from './fixtures/test-keys-path.js '
const { sync: rmrf } = rimraf
const { createIdentity } = Identities
Object.keys(testAPIs).forEach((_) => {
describe('Storages (' + _ + ')', function () {

View File

@@ -1,85 +0,0 @@
import IdentityProvider from 'orbit-db-identity-provider/interface'
import pkg from 'elliptic'
const { ec: EC } = pkg
const ec = new EC('secp256k1')
/**
* A custom keystore example
*/
class CustomTestKeystore {
constructor (storage) {
// Use just one key throughout the keystore
// for mock purposes
this.key = this.createKey()
}
hasKey () {
return this.key !== undefined ? true : false
}
createKey (id) {
const key = ec.genKeyPair()
const keyPair = {
public: {
marshal: () => key.getPublic('hex')
},
priv: key.getPrivate('hex'),
privEnc: 'hex',
pubEnc: 'hex',
}
return keyPair
}
getKey (id) {
return this.key
}
sign (key, data) {
return Promise.resolve('<signature>')
}
verify (signature, publicKey, data) {
return Promise.resolve(true)
}
getPublic (key) {
return key.public.marshal()
}
close () {}
open () {}
}
class CustomIdProvider extends IdentityProvider {
constructor (options = {}) {
super()
this._keystore = options.keystore || new CustomTestKeystore()
}
// Returns the type of the identity provider
static get type () { return 'custom' }
async getId (options = {}) {
return 'id'
}
async signIdentity (data, options = {}) {
const keystore = this._keystore
return keystore.sign(null, data)
}
static async verifyIdentity (identity) {
// Verify that identity was signed by the ID
return true
}
}
export default (LocalStorage, mkdir) => {
return {
create: (directory) => {
return new CustomTestKeystore()
},
identityProvider: CustomIdProvider
}
}

View File

@@ -1,42 +0,0 @@
export default [
{
type: 'eventlog',
create: (orbitdb, name, options) => orbitdb.eventlog(name, options),
tryInsert: (db) => db.add('hello'),
query: (db) => db.iterator({ limit: -1 }).collect(),
getTestValue: (db) => db.iterator({ limit: -1 }).collect()[0].payload.value,
expectedValue: 'hello',
},
{
type: 'feed',
create: (orbitdb, name, options) => orbitdb.feed(name, options),
tryInsert: (db) => db.add('hello'),
query: (db) => db.iterator({ limit: -1 }).collect(),
getTestValue: (db) => db.iterator({ limit: -1 }).collect()[0].payload.value,
expectedValue: 'hello',
},
{
type: 'key-value',
create: (orbitdb, name, options) => orbitdb.kvstore(name, options),
tryInsert: (db) => db.set('one', 'hello'),
query: (db) => [],
getTestValue: (db) => db.get('one'),
expectedValue: 'hello',
},
{
type: 'documents',
create: (orbitdb, name, options) => orbitdb.docstore(name, options),
tryInsert: (db) => db.put({ _id: 'hello world', doc: 'all the things'}),
query: (db) => [],
getTestValue: (db) => db.get('hello world'),
expectedValue: [{ _id: 'hello world', doc: 'all the things'}],
},
{
type: 'counter',
create: (orbitdb, name, options) => orbitdb.counter(name, options),
tryInsert: (db) => db.inc(8),
query: (db) => [],
getTestValue: (db) => db.value,
expectedValue: 8,
},
]

View File

@@ -1,2 +0,0 @@
export { default as CustomTestKeystore } from './custom-test-keystore.js'
export { default as databases } from './databases.js'

View File

@@ -1,7 +1,7 @@
const waitFor = async (valueA, toBeValueB, pollInterval = 100) => {
return new Promise((resolve) => {
const interval = setInterval(() => {
if (valueA() === toBeValueB()) {
const interval = setInterval(async () => {
if (await valueA() === await toBeValueB()) {
clearInterval(interval)
resolve()
}

View File

@@ -23,7 +23,6 @@
// fs.mkdirSync(directory, { recursive: true })
// }
// const dbPath = path.join('./orbitdb', 'tests', 'v0')
// const dbFixturesDir = path.join('./test', 'fixtures', 'v0', 'QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC', 'v0-db')
// const keyFixtures = path.join('./test', 'fixtures', 'keys','QmRfPsKJs9YqTot5krRibra4gPwoK4kghhU8iKWxBjGDDX')
@@ -44,7 +43,6 @@
// ipfs = ipfsd.api
// rmrf.sync(dbPath)
// const zip = new Zip(ipfsFixtures)
// await zip.extractAllToAsync(path.join('./test', 'fixtures'), true)