Merge pull request #495 from orbitdb/feat/new-acs

New Access Controller system
This commit is contained in:
Mark Robert Henderson 2019-04-01 18:34:49 +00:00 committed by GitHub
commit ee9764484d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
65 changed files with 4300 additions and 4524 deletions

View File

@ -41,37 +41,38 @@ jobs:
# This is based on your 1.0 configuration file or project settings
- run:
working_directory: ~/orbitdb/orbit-db
command: nvm install 8.2.0 && nvm alias default 8.2.0
command: nvm install 10.13.0 && nvm alias default 10.13.0
# Dependencies
# This would typically go in either a build or a build-and-test job when using workflows
# Restore the dependency cache
- restore_cache:
keys:
# This branch if available
- v1-dep-{{ .Branch }}-
# Default branch if not
- v1-dep-master-
# Any branch if there are none on the default branch - this should be unnecessary if you have your default branch configured correctly
- v1-dep-
# - restore_cache:
# keys:
# # This branch if available
# - v1-dep-{{ .Branch }}-
# # Default branch if not
# - v1-dep-master-
# # Any branch if there are none on the default branch - this should be unnecessary if you have your default branch configured correctly
# - v1-dep-
# The following line was run implicitly in your 1.0 builds based on what CircleCI inferred about the structure of your project. In 2.0 you need to be explicit about which commands should be run. In some cases you can discard inferred commands if they are not relevant to your project.
- run: if [ -z "${NODE_ENV:-}" ]; then export NODE_ENV=test; fi
- run: export PATH="~/orbitdb/orbit-db/node_modules/.bin:$PATH"
- run: rm -rf node_modules/
- run: npm install
# Save dependency cache
- save_cache:
key: v1-dep-{{ .Branch }}-{{ epoch }}
paths:
# This is a broad list of cache paths to include many possible development environments
# You can probably delete some of these entries
- vendor/bundle
- ~/virtualenvs
- ~/.m2
- ~/.ivy2
- ~/.bundle
- ~/.go_workspace
- ~/.gradle
- ~/.cache/bower
- ./node_modules
# - save_cache:
# key: v1-dep-{{ .Branch }}-{{ epoch }}
# paths:
# # This is a broad list of cache paths to include many possible development environments
# # You can probably delete some of these entries
# - vendor/bundle
# - ~/virtualenvs
# - ~/.m2
# - ~/.ivy2
# - ~/.bundle
# - ~/.go_workspace
# - ~/.gradle
# - ~/.cache/bower
# - ./node_modules
# Test
# This would typically be a build job when using workflows, possibly combined with build
# The following line was run implicitly in your 1.0 builds based on what CircleCI inferred about the structure of your project. In 2.0 you need to be explicit about which commands should be run. In some cases you can discard inferred commands if they are not relevant to your project.

View File

@ -1,6 +1,6 @@
'use strict'
const IPFS = require('ipfs-api')
const IPFS = require('ipfs-http-client')
const OrbitDB = require('../src/OrbitDB')
// Metrics
@ -26,8 +26,8 @@ const ipfs = IPFS('127.0.0.1')
const run = async () => {
try {
const orbit = new OrbitDB(ipfs, './orbitdb/benchmarks')
const db = await orbit.eventlog('orbit-db.benchmark', {
const orbit = await OrbitDB.createInstance(ipfs, { directory: './orbitdb/benchmarks' })
const db = await orbit.eventlog('orbit-db.benchmark', {
replicate: false,
})

View File

@ -44,8 +44,8 @@ ipfs.on('error', (err) => console.error(err))
ipfs.on('ready', async () => {
const run = async () => {
try {
const orbit = new OrbitDB(ipfs, './orbitdb/benchmarks')
const db = await orbit.eventlog('orbit-db.benchmark', {
const orbit = await OrbitDB.createInstance(ipfs,{ directory: './orbitdb/benchmarks' })
const db = await orbit.eventlog('orbit-db.benchmark', {
replicate: false,
})

View File

@ -46,8 +46,8 @@ const defaultConfig = Object.assign({}, {
config: ipfsConf
})
const conf1 = Object.assign({}, defaultConfig, {
repo: new IPFSRepo('./orbitdb/benchmarks/replication/client1/ipfs', repoConf)
const conf1 = Object.assign({}, defaultConfig, {
repo: new IPFSRepo('./orbitdb/benchmarks/replication/client1/ipfs', repoConf)
})
// Write loop
@ -74,7 +74,7 @@ const outputMetrics = (name, db, metrics) => {
if(metrics.seconds % 10 === 0) {
console.log(`[${name}] --> Average of ${metrics.lastTenSeconds/10} q/s in the last 10 seconds`)
metrics.lastTenSeconds = 0
}
}
}
const database = 'benchmark-replication'
@ -87,7 +87,7 @@ pMapSeries([conf1,], d => startIpfs('js-ipfs', d))
.then(async ([ipfs1]) => {
try {
// Create the databases
const orbit1 = new OrbitDB(ipfs1.api, './orbitdb/benchmarks/replication/client1')
const orbit1 = await OrbitDB.createInstance(ipfs1.api, { directory: './orbitdb/benchmarks/replication/client1' })
const db1 = await orbit1.eventlog(database, { overwrite: true })
console.log(db1.address.toString())

View File

@ -46,8 +46,8 @@ const defaultConfig = Object.assign({}, {
config: ipfsConf
})
const conf2 = Object.assign({}, defaultConfig, {
repo: new IPFSRepo('./orbitdb/benchmarks/replication/client22faf/ipfs', repoConf)
const conf2 = Object.assign({}, defaultConfig, {
repo: new IPFSRepo('./orbitdb/benchmarks/replication/client22faf/ipfs', repoConf)
})
// Metrics output function
@ -59,7 +59,7 @@ const outputMetrics = (name, db, metrics) => {
if(metrics.seconds % 10 === 0) {
console.log(`[${name}] --> Average of ${metrics.lastTenSeconds/10} q/s in the last 10 seconds`)
metrics.lastTenSeconds = 0
}
}
}
const database = 'benchmark-replication'
@ -72,7 +72,7 @@ pMapSeries([conf2], d => startIpfs('js-ipfs', d))
.then(async ([ipfs2]) => {
try {
// Create the databases
const orbit2 = new OrbitDB(ipfs2.api, './orbitdb/benchmarks/replication/client2')
const orbit2 = await OrbitDB.createInstance(ipfs2.api, { directory: './orbitdb/benchmarks/replication/client2' })
const address = process.argv[2]
const db2 = await orbit2.eventlog(address)

View File

@ -32,7 +32,7 @@
}
let run = (() => {
ipfs = new Ipfs({
ipfs = new Ipfs({
repo: '/orbitdb/benchmarks/browser/benchmark-add/0.27.0',
start: false,
EXPERIMENTAL: {
@ -47,8 +47,8 @@
ipfs.on('ready', async () => {
const outputElm = document.getElementById('output')
try {
const orbit = new OrbitDB(ipfs, './orbitdb/benchmarks/browser')
const db = await orbit.eventlog('orbit-db.benchmark.add', {
const orbit = await OrbitDB.createInstance(ipfs, { directory: './orbitdb/benchmarks/browser' })
const db = await orbit.eventlog('orbit-db.benchmark.add', {
replicate: false,
})

View File

@ -53,7 +53,7 @@
config: ipfsConf
})
const conf1 = Object.assign({}, defaultConfig, {
const conf1 = Object.assign({}, defaultConfig, {
repo: './orbitdb/benchmarks/2replication3/client2/ipfs'
})
@ -77,7 +77,7 @@
if(metrics.seconds % 10 === 0) {
console.log(`[${name}] --> Average of ${metrics.lastTenSeconds/10} q/s in the last 10 seconds`)
metrics.lastTenSeconds = 0
}
}
}
const startIpfs = (config = {}) => {
@ -98,7 +98,7 @@
let ipfs1 = await startIpfs(conf1)
try {
// Create the databases
const orbit1 = new OrbitDB(ipfs1, './orbitdb/benchmarks/replication/client1')
const orbit1 = await OrbitDB.createInstance(ipfs1, { directory: './orbitdb/benchmarks/replication/client1' })
const db1 = await orbit1.eventlog(database, { overwrite: true, sync: false })
console.log("Database address is:", db1.address.toString())

View File

@ -60,11 +60,11 @@
config: ipfsConf
})
const conf1 = Object.assign({}, defaultConfig, {
const conf1 = Object.assign({}, defaultConfig, {
repo: './orbitdb/benchmarks/2replication3/client1/ipfs'
})
const conf2 = Object.assign({}, defaultConfig, {
const conf2 = Object.assign({}, defaultConfig, {
repo: './orbitdb/benchmarks/2replication3/client2/ipfs'
})
@ -88,7 +88,7 @@
if(metrics.seconds % 10 === 0) {
console.log(`[${name}] --> Average of ${metrics.lastTenSeconds/10} q/s in the last 10 seconds`)
metrics.lastTenSeconds = 0
}
}
}
const startIpfs = (config = {}) => {
@ -109,8 +109,8 @@
let ipfs2 = await startIpfs(conf2)
try {
// Create the databases
const address = '/orbitdb/QmcPCAwwV1rw7cLQU7VcCaUXEuLYSCH8uUf6NPDLYbL6JT/benchmark-replication'
const orbit2 = new OrbitDB(ipfs2, './orbitdb/benchmarks/replication/client3')
const address = '/orbitdb/zdpuArNFJaH4Fk5dHH4rYSCtmbyaJLNHUyeFX4WszFxEqwJTE/benchmark-replication'
const orbit2 = await OrbitDB.createInstance(ipfs2, { directory: './orbitdb/benchmarks/replication/client3' })
const db2 = await orbit2.eventlog(address)
db2.events.on('peer', peer => console.log("PEER2!", peer))

View File

@ -2,14 +2,13 @@
const path = require('path')
const webpack = require('webpack')
const Uglify = require('uglifyjs-webpack-plugin')
module.exports = {
entry: './src/OrbitDB.js',
output: {
libraryTarget: 'var',
library: 'OrbitDB',
filename: './dist/orbitdb.min.js'
filename: '../dist/orbitdb.min.js'
},
target: 'web',
devtool: 'none',
@ -26,8 +25,7 @@ module.exports = {
'process.env': {
'NODE_ENV': JSON.stringify(process.env.NODE_ENV)
}
}),
new Uglify(),
})
],
resolve: {
modules: [

View File

@ -1,14 +1,13 @@
'use strict'
const path = require('path')
const Uglify = require('uglifyjs-webpack-plugin')
module.exports = {
entry: './src/OrbitDB.js',
output: {
libraryTarget: 'var',
library: 'OrbitDB',
filename: './dist/orbitdb.js'
filename: '../dist/orbitdb.js'
},
target: 'web',
devtool: 'source-map',

View File

@ -2,19 +2,11 @@
const path = require('path')
const webpack = require('webpack')
const Uglify = require('uglifyjs-webpack-plugin')
const uglifyOptions = {
uglifyOptions: {
mangle: false,
compress: false,
},
}
module.exports = {
entry: './examples/browser/browser-webpack-example/index.js',
output: {
filename: './examples/browser/browser-webpack-example/bundle.js'
filename: '../examples/browser/browser-webpack-example/bundle.js'
},
target: 'web',
devtool: 'none',
@ -26,8 +18,7 @@ module.exports = {
'process.env': {
'NODE_ENV': JSON.stringify(process.env.NODE_ENV)
}
}),
new Uglify(uglifyOptions),
})
],
resolve: {
modules: [

View File

@ -51,8 +51,8 @@
</div>
<div id="writerText"></div>
<script type="text/javascript" src="lib/orbitdb.min.js" charset="utf-8"></script>
<script type="text/javascript" src="lib/ipfs.js" charset="utf-8"></script>
<script type="text/javascript" src="../../dist/orbitdb.js" charset="utf-8"></script>
<script type="text/javascript" src="../../node_modules/ipfs/dist/index.js" charset="utf-8"></script>
<script type="text/javascript" src="example.js" charset="utf-8"></script>
<script type="text/javascript" charset="utf-8">
// Start the example

View File

@ -46,8 +46,11 @@ const main = (IPFS, ORBITDB) => {
// Create IPFS instance
const ipfs = new Ipfs({
repo: '/orbitdb/examples/browser/new/ipfs/0.27.3',
repo: '/orbitdb/examples/browser/new/ipfs/0.33.1',
start: true,
preload: {
enabled: false
},
EXPERIMENTAL: {
pubsub: true,
},
@ -65,11 +68,11 @@ const main = (IPFS, ORBITDB) => {
})
ipfs.on('error', (e) => handleError(e))
ipfs.on('ready', () => {
ipfs.on('ready', async () => {
openButton.disabled = false
createButton.disabled = false
statusElm.innerHTML = "IPFS Started"
orbitdb = new OrbitDB(ipfs)
orbitdb = await OrbitDB.createInstance(ipfs)
})
const load = async (db, statusText) => {
@ -89,8 +92,8 @@ const main = (IPFS, ORBITDB) => {
let maxTotal = 0, loaded = 0
db.events.on('load.progress', (address, hash, entry, progress, total) => {
loaded ++
maxTotal = Math.max.apply(null, [progress, maxTotal, progress, 0])
total = Math.max.apply(null, [progress, maxTotal, total, 0])
maxTotal = Math.max.apply(null, [maxTotal, progress, 0])
total = Math.max.apply(null, [progress, maxTotal, total, entry.clock.time, 0])
statusElm.innerHTML = `Loading database... ${maxTotal} / ${total}`
})
@ -115,10 +118,8 @@ const main = (IPFS, ORBITDB) => {
await update(db)
} catch (e) {
console.error(e.toString())
if (e.toString() === 'Error: Not allowed to write') {
writerText.innerHTML = '<span style="color: red">' + e.toString() + '</span>'
clearInterval(updateInterval)
}
writerText.innerHTML = '<span style="color: red">' + e.toString() + '</span>'
clearInterval(updateInterval)
}
}, interval)
}
@ -158,7 +159,9 @@ const main = (IPFS, ORBITDB) => {
type: type,
// If "Public" flag is set, allow anyone to write to the database,
// otherwise only the creator of the database can write
write: publicAccess ? ['*'] : [],
accessController: {
write: publicAccess ? ['*'] : [orbitdb.identity.publicKey],
}
})
await load(db, 'Creating database...')
@ -255,7 +258,7 @@ const main = (IPFS, ORBITDB) => {
outputElm.innerHTML = `
<div><b>Peer ID:</b> ${orbitdb.id}</div>
<div><b>Peers (database/network):</b> ${databasePeers.length} / ${networkPeers.length}</div>
<div><b>Oplog Size:</b> ${db._replicationStatus.progress} / ${db._replicationStatus.max}</div>
<div><b>Oplog Size:</b> ${Math.max(db._replicationStatus.progress, db._oplog.length)} / ${db._replicationStatus.max}</div>
<h2>Results</h2>
<div id="results">
<div>

View File

@ -21,8 +21,11 @@ ipfs.on('ready', async () => {
let db
try {
const orbitdb = new OrbitDB(ipfs, './orbitdb/examples/eventlog')
const orbitdb = await OrbitDB.createInstance(ipfs, {
directory: './orbitdb/examples/eventlog'
})
db = await orbitdb.eventlog('example', { overwrite: true })
await db.load()
} catch (e) {
console.error(e)
process.exit(1)
@ -40,7 +43,7 @@ ipfs.on('ready', async () => {
output += `--------------------\n`
output += `ID | Visitor\n`
output += `--------------------\n`
output += latest.reverse().map((e) => e.payload.value.userId + ' | ' + e.payload.value.avatar + ')').join('\n') + `\n`
output += latest.reverse().map((e) => e.payload.value.userId + ' | ' + e.payload.value.avatar).join('\n') + `\n`
console.log(output)
} catch (e) {
console.error(e)

View File

@ -36,7 +36,9 @@ ipfs.on('error', (err) => console.error(err))
ipfs.on('ready', async () => {
let db
try {
const orbitdb = new OrbitDB(ipfs, './orbitdb/examples/eventlog')
const orbitdb = await OrbitDB.createInstance(ipfs, {
directory: './orbitdb/examples/keyvalue'
})
db = await orbitdb.kvstore('example', { overwrite: true })
await db.load()
// Query immediately after loading

7272
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "orbit-db",
"version": "0.19.9",
"version": "0.20.0-rc.1",
"description": "Distributed p2p database on IPFS",
"author": "Haad",
"license": "MIT",
@ -9,45 +9,53 @@
"url": "https://github.com/orbitdb/orbit-db"
},
"engines": {
"node": ">=8.0.0"
"node": ">=10.0.0"
},
"main": "src/OrbitDB.js",
"dependencies": {
"cids": "^0.5.7",
"ipfs-pubsub-1on1": "~0.0.4",
"ipld-dag-pb": "0.14.11",
"localstorage-down": "^0.6.7",
"logplease": "^1.2.14",
"multihashes": "^0.4.12",
"orbit-db-access-controllers": "rc1",
"orbit-db-cache": "~0.2.4",
"orbit-db-counterstore": "~1.4.0",
"orbit-db-docstore": "~1.4.3",
"orbit-db-eventstore": "~1.4.0",
"orbit-db-feedstore": "~1.4.0",
"orbit-db-keystore": "~0.1.0",
"orbit-db-kvstore": "~1.4.0",
"orbit-db-counterstore": "rc1",
"orbit-db-docstore": "rc1",
"orbit-db-eventstore": "rc1",
"orbit-db-feedstore": "rc1",
"orbit-db-identity-provider": "rc1",
"orbit-db-io": "~0.0.1",
"orbit-db-keystore": "rc1",
"orbit-db-kvstore": "rc1",
"orbit-db-pubsub": "~0.5.5",
"orbit-db-store": "~2.5.3"
"orbit-db-store": "rc1"
},
"devDependencies": {
"babel-core": "^6.26.0",
"babel-loader": "^7.1.2",
"babel-plugin-transform-runtime": "^6.23.0",
"babel-polyfill": "^6.26.0",
"babel-preset-es2015": "^6.24.1",
"datastore-level": "~0.8.0",
"go-ipfs-dep": "0.4.13",
"ipfs": "~0.30.0",
"ipfs-repo": "~0.22.1",
"ipfsd-ctl": "^0.37.5",
"babel-preset-env": "^1.7.0",
"datastore-level": "~0.10.0",
"fs-extra": "^7.0.1",
"go-ipfs-dep": "^0.4.18",
"ipfs": "^0.34.4",
"ipfs-repo": "^0.26.2",
"ipfsd-ctl": "^0.42.0",
"localstorage-level-migration": "^0.0.1",
"markdown-toc": "^1.2.0",
"mocha": "^4.0.1",
"mocha": "^5.2.0",
"p-each-series": "^1.0.0",
"p-map": "^1.2.0",
"p-map-series": "^1.0.0",
"p-whilst": "^1.0.0",
"pify": "^4.0.1",
"remark-cli": "^5.0.0",
"remark-validate-links": "^7.0.0",
"rimraf": "^2.6.2",
"uglifyjs-webpack-plugin": "^1.1.4",
"webpack": "^3.8.1"
"webpack": "^4.25.1",
"webpack-cli": "^3.1.2"
},
"scripts": {
"examples": "npm run examples:node",
@ -61,6 +69,6 @@
"build:dist": "webpack --config conf/webpack.config.js --sort-modules-by size && mkdir -p examples/browser/lib && cp dist/orbitdb.min.js examples/browser/lib/orbitdb.min.js",
"build:debug": "webpack --config conf/webpack.debug.config.js --sort-modules-by size && mkdir -p examples/browser/lib && cp dist/orbitdb.js examples/browser/lib/orbitdb.js && cp dist/orbitdb.js.map examples/browser/lib/orbitdb.js.map",
"build:docs/toc": "markdown-toc --no-first1 -i README.md && markdown-toc --no-first1 -i API.md && markdown-toc --no-first1 -i GUIDE.md && markdown-toc --no-first1 -i CHANGELOG.md && markdown-toc --no-first1 -i FAQ.md ",
"build:es5": "babel src --out-dir ./dist/es5/ --presets babel-preset-es2015 --plugins babel-plugin-transform-runtime"
"build:es5": "babel src --out-dir ./dist/es5/ --presets babel-preset-env --plugins babel-plugin-transform-runtime"
}
}

View File

@ -9,10 +9,12 @@ const DocumentStore = require('orbit-db-docstore')
const Pubsub = require('orbit-db-pubsub')
const Cache = require('orbit-db-cache')
const Keystore = require('orbit-db-keystore')
const AccessController = require('./ipfs-access-controller')
const Identities = require('orbit-db-identity-provider')
let AccessControllers = require('orbit-db-access-controllers')
const OrbitDBAddress = require('./orbit-db-address')
const createDBManifest = require('./db-manifest')
const exchangeHeads = require('./exchange-heads')
const { isDefined, io } = require('./utils')
const Logger = require('logplease')
const logger = Logger.create("orbit-db")
@ -27,19 +29,49 @@ let databaseTypes = {
'keyvalue': KeyValueStore,
}
class OrbitDB {
constructor(ipfs, directory, options = {}) {
class OrbitDB {
constructor(ipfs, identity, options = {}) {
if (!isDefined(ipfs))
throw new Error('IPFS is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
if (!isDefined(identity))
throw new Error('identity is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
this._ipfs = ipfs
this.id = options.peerId || (this._ipfs._peerInfo ? this._ipfs._peerInfo.id._idB58String : 'default')
this.identity = identity
this.id = options.peerId
this._pubsub = options && options.broker
? new options.broker(this._ipfs)
: new Pubsub(this._ipfs, this.id)
this.stores = {}
this.directory = directory || './orbitdb'
this.keystore = options.keystore || Keystore.create(path.join(this.directory, this.id, '/keystore'))
this.directory = options.directory || './orbitdb'
this.keystore = options.keystore
this.cache = options.cache || Cache
this.key = this.keystore.getKey(this.id) || this.keystore.createKey(this.id)
this.stores = {}
this._directConnections = {}
// AccessControllers module can be passed in to enable
// testing with orbit-db-access-controller
AccessControllers = options.AccessControllers || AccessControllers
}
static async createInstance (ipfs, options = {}) {
if (!isDefined(ipfs))
throw new Error('IPFS is a required argument. See https://github.com/orbitdb/orbit-db/blob/master/API.md#createinstance')
const { id } = await ipfs.id()
const directory = options.directory || './orbitdb'
const keystore = options.keystore || Keystore.create(path.join(directory, id, '/keystore'))
const identity = options.identity || await Identities.createIdentity({
id: options.id || id,
keystore: keystore,
})
options = Object.assign({}, options, {
peerId: id ,
directory: directory,
keystore: keystore
})
const orbitdb = new OrbitDB(ipfs, identity, options)
return orbitdb
}
/* Databases */
@ -48,7 +80,7 @@ class OrbitDB {
return this.open(address, options)
}
async log (address, options) {
async log (address, options = {}) {
options = Object.assign({ create: true, type: 'eventlog' }, options)
return this.open(address, options)
}
@ -57,12 +89,12 @@ class OrbitDB {
return this.log(address, options)
}
async keyvalue (address, options) {
async keyvalue (address, options = {}) {
options = Object.assign({ create: true, type: 'keyvalue' }, options)
return this.open(address, options)
}
async kvstore (address, options) {
async kvstore (address, options = {}) {
return this.keyvalue(address, options)
}
@ -81,6 +113,10 @@ class OrbitDB {
}
async disconnect () {
//close Keystore
if (this.keystore.close)
await this.keystore.close()
// Close all open databases
const databases = Object.values(this.stores)
for (let db of databases) {
@ -121,8 +157,7 @@ class OrbitDB {
let accessController
if (options.accessControllerAddress) {
accessController = new AccessController(this._ipfs)
await accessController.load(options.accessControllerAddress)
accessController = await AccessControllers.resolve(this, options.accessControllerAddress, options.accessController)
}
const cache = await this._loadCache(this.directory, address)
@ -134,9 +169,8 @@ class OrbitDB {
onClose: this._onClose.bind(this),
})
const store = new Store(this._ipfs, this.id, address, opts)
const store = new Store(this._ipfs, this.identity, address, opts)
store.events.on('write', this._onWrite.bind(this))
// ID of the store is the address as a string
const addr = address.toString()
this.stores[addr] = store
@ -211,25 +245,9 @@ class OrbitDB {
if (OrbitDBAddress.isValid(name))
throw new Error(`Given database name is an address. Please give only the name of the database!`)
// Create an AccessController
const accessController = new AccessController(this._ipfs)
/* Disabled temporarily until we do something with the admin keys */
// Add admins of the database to the access controller
// if (options && options.admin) {
// options.admin.forEach(e => accessController.add('admin', e))
// } else {
// // Default is to add ourselves as the admin of the database
// accessController.add('admin', this.key.getPublic('hex'))
// }
// Add keys that can write to the database
if (options && options.write && options.write.length > 0) {
options.write.forEach(e => accessController.add('write', e))
} else {
// Default is to add ourselves as the admin of the database
accessController.add('write', this.key.getPublic('hex'))
}
// Save the Access Controller in IPFS
const accessControllerAddress = await accessController.save(onlyHash)
// Create an AccessController, use IPFS AC as the default
options.accessController = Object.assign({}, { type: 'ipfs' }, options.accessController)
const accessControllerAddress = await AccessControllers.create(this, options.accessController.type, options.accessController || {})
// Save the manifest to IPFS
const manifestHash = await createDBManifest(this._ipfs, name, type, accessControllerAddress, onlyHash)
@ -242,8 +260,7 @@ class OrbitDB {
/*
options = {
admin: [], // array of keys that are the admins of this database (same as write access)
write: [], // array of keys that can write to this database
accessController: { write: [] } // array of keys that can write to this database
directory: './orbitdb', // directory in which to place the database files
overwrite: false, // whether we should overwrite the existing database if it exists
}
@ -268,7 +285,7 @@ class OrbitDB {
throw new Error(`Database '${dbAddress}' already exists!`)
// Save the database locally
await this._saveDBManifest(directory, dbAddress)
await this._addManifestToCache(directory, dbAddress)
logger.debug(`Created database '${dbAddress}'`)
@ -291,6 +308,7 @@ class OrbitDB {
*/
async open (address, options = {}) {
logger.debug(`open()`)
options = Object.assign({ localOnly: false, create: false }, options)
logger.debug(`Open database '${address}'`)
@ -319,6 +337,7 @@ class OrbitDB {
// Check if we have the database
const haveDB = await this._haveLocalData(cache, dbAddress)
logger.debug((haveDB ? 'Found' : 'Didn\'t find') + ` database '${dbAddress}'`)
// If we want to try and open the database local-only, throw an error
@ -331,8 +350,7 @@ class OrbitDB {
logger.debug(`Loading Manifest for '${dbAddress}'`)
// Get the database manifest from IPFS
const dag = await this._ipfs.object.get(dbAddress.root)
const manifest = JSON.parse(dag.toJSON().data)
const manifest = await io.read(this._ipfs, dbAddress.root)
logger.debug(`Manifest for '${dbAddress}':\n${JSON.stringify(manifest, null, 2)}`)
// Make sure the type from the manifest matches the type that was given as an option
@ -340,7 +358,7 @@ class OrbitDB {
throw new Error(`Database '${dbAddress}' is type '${manifest.type}' but was opened as '${options.type}'`)
// Save the database locally
await this._saveDBManifest(directory, dbAddress)
await this._addManifestToCache(directory, dbAddress)
// Open the the database
options = Object.assign({}, options, { accessControllerAddress: manifest.accessController })
@ -348,13 +366,12 @@ class OrbitDB {
}
// Save the database locally
async _saveDBManifest (directory, dbAddress) {
async _addManifestToCache (directory, dbAddress) {
const cache = await this._loadCache(directory, dbAddress)
await cache.set(path.join(dbAddress.toString(), '_manifest'), dbAddress.root)
logger.debug(`Saved manifest to IPFS as '${dbAddress.root}'`)
}
// Loads the locally saved database information (manifest, head hashes)
async _loadCache (directory, dbAddress) {
let cache
try {

View File

@ -1,85 +0,0 @@
'use strict'
class AccessController {
constructor () {
this._access = {
admin: [],
write: [],
read: [], // Not used atm
}
}
/* Overridable functions */
async load (address) {}
async save () {}
/* Properties */
get admin () {
return this._access.admin
}
get write () {
// Both admins and write keys can write
return this._access.write.concat(this._access.admin)
}
// Not used atm
get read () {
return this._access.read
}
/* Public Methods */
add (access, key) {
// if(!Object.keys(this._access).includes(access))
// throw new Error(`unknown access level: ${access}`)
// if (!this._access[access].includes(key))
// this._access[access].push(key)
// TODO: uniques only
switch (access) {
case 'admin':
this._access.admin.push(key)
break
case 'write':
this._access.write.push(key)
break
case 'read':
this._access.read.push(key)
break
default:
break
}
}
remove (access, key) {
const without = (arr, e) => {
const reducer = (res, val) => {
if (val !== key)
res.push(val)
return res
}
return arr.reduce(reducer, [])
}
// if(!Object.keys(this._access).includes(access))
// throw new Error(`unknown access level: ${access}`)
// if (this._access[access].includes(key))
// this._access[access] = without(this._access[access], key)
switch (access) {
case 'admin':
this._access.admin = without(this._access.admin, key)
break
case 'write':
this._access.write = without(this._access.write, key)
break
case 'read':
this._access.read = without(this._access.read, key)
break
default:
break
}
}
}
module.exports = AccessController

View File

@ -1,5 +1,5 @@
const path = require('path')
const { DAGNode } = require('ipld-dag-pb')
const io = require('orbit-db-io')
// Creates a DB manifest file and saves it in IPFS
const createDBManifest = async (ipfs, name, type, accessControllerAddress, onlyHash) => {
@ -8,21 +8,8 @@ const createDBManifest = async (ipfs, name, type, accessControllerAddress, onlyH
type: type,
accessController: path.join('/ipfs', accessControllerAddress),
}
let dag
const manifestJSON = JSON.stringify(manifest)
if (onlyHash) {
dag = await new Promise(resolve => {
DAGNode.create(Buffer.from(manifestJSON), (err, n) => {
if (err) {
throw err
}
resolve(n)
})
})
} else {
dag = await ipfs.object.put(Buffer.from(manifestJSON))
}
return dag.toJSON().multihash.toString()
return io.write(ipfs, 'dag-cbor', manifest, { onlyHash })
}
module.exports = createDBManifest

View File

@ -1,53 +0,0 @@
'use strict'
const AccessController = require('./access-controller')
const { DAGNode } = require('ipld-dag-pb')
class IPFSAccessController extends AccessController {
constructor (ipfs) {
super()
this._ipfs = ipfs
}
async load (address) {
// Transform '/ipfs/QmPFtHi3cmfZerxtH9ySLdzpg1yFhocYDZgEZywdUXHxFU'
// to 'QmPFtHi3cmfZerxtH9ySLdzpg1yFhocYDZgEZywdUXHxFU'
if (address.indexOf('/ipfs') === 0)
address = address.split('/')[2]
try {
const dag = await this._ipfs.object.get(address)
const obj = JSON.parse(dag.toJSON().data)
this._access = obj
} catch (e) {
console.log("ACCESS ERROR:", e)
}
}
async save (onlyHash) {
let hash
try {
const access = JSON.stringify(this._access, null, 2)
let dag
if (onlyHash) {
dag = await new Promise(resolve => {
DAGNode.create(Buffer.from(access), (err, n) => {
if (err) {
throw err
}
resolve(n)
})
})
} else {
dag = await this._ipfs.object.put(new Buffer(access))
}
hash = dag.toJSON().multihash.toString()
} catch (e) {
console.log("ACCESS ERROR:", e)
}
return hash
}
}
module.exports = IPFSAccessController

View File

@ -1,7 +1,9 @@
'use strict'
const path = require('path')
const multihash = require('multihashes')
const CID = require('cids')
const notEmpty = e => e !== '' && e !== ' '
class OrbitDBAddress {
constructor (root, path) {
@ -14,14 +16,19 @@ class OrbitDBAddress {
}
static isValid (address) {
const containsProtocolPrefix = (e, i) => !((i === 0 || i === 1) && address.toString().indexOf('/orbit') === 0 && e === 'orbitdb')
const parts = address.toString()
.split('/')
.filter((e, i) => !((i === 0 || i === 1) && address.toString().indexOf('/orbit') === 0 && e === 'orbitdb'))
.filter(e => e !== '' && e !== ' ')
.filter(containsProtocolPrefix)
.filter(notEmpty)
let accessControllerHash
const accessControllerHash = parts[0].indexOf('Qm') > -1 ? multihash.fromB58String(parts[0]) : null
try {
multihash.validate(accessControllerHash)
accessControllerHash = (parts[0].indexOf('zd') > -1 || parts[0].indexOf('Qm') > -1)
? new CID(parts[0]).toBaseEncodedString()
: null
} catch (e) {
return false
}
@ -30,7 +37,7 @@ class OrbitDBAddress {
}
static parse (address) {
if (!address)
if (!address)
throw new Error(`Not a valid OrbitDB address: ${address}`)
if (!OrbitDBAddress.isValid(address))

9
src/utils/index.js Normal file
View File

@ -0,0 +1,9 @@
'use strict'
const isDefined = require('./is-defined')
const io = require('orbit-db-io')
module.exports = {
isDefined,
io
}

5
src/utils/is-defined.js Normal file
View File

@ -0,0 +1,5 @@
'use strict'
const isDefined = (arg) => arg !== undefined && arg !== null
module.exports = isDefined

View File

@ -5,7 +5,6 @@ const mapSeries = require('p-each-series')
const path = require('path')
const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
const {
config,
@ -55,9 +54,9 @@ Object.keys(testAPIs).forEach(API => {
await stopIpfs(ipfsd2)
})
beforeEach(() => {
orbitdb1 = new OrbitDB(ipfs1, './orbitdb/1')
orbitdb2 = new OrbitDB(ipfs2, './orbitdb/2')
beforeEach(async () => {
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: './orbitdb/1' })
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: './orbitdb/2' })
})
afterEach(async () => {
@ -100,11 +99,13 @@ Object.keys(testAPIs).forEach(API => {
it('syncs counters', async () => {
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
accessController: {
// Set write access for both clients
write: [
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
}
}
const numbers = [[13, 10], [2, 5]]

View File

@ -9,7 +9,7 @@ const levelup = require('levelup')
const leveldown = require('leveldown')
const OrbitDB = require('../src/OrbitDB')
const OrbitDBAddress = require('../src/orbit-db-address')
const io = require('orbit-db-io')
// Include test utilities
const {
config,
@ -34,11 +34,11 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb = new OrbitDB(ipfs, dbPath)
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
})
after(async () => {
if(orbitdb)
if(orbitdb)
await orbitdb.stop()
if (ipfsd)
@ -104,8 +104,8 @@ Object.keys(testAPIs).forEach(API => {
it('database has the correct address', async () => {
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('second'), 56)
assert.equal(db.address.toString().indexOf('zd'), 9)
assert.equal(db.address.toString().indexOf('second'), 59)
})
it('saves the database locally', async () => {
@ -132,8 +132,7 @@ Object.keys(testAPIs).forEach(API => {
})
it('saves database manifest file locally', async () => {
const dag = await ipfs.object.get(db.address.root)
const manifest = JSON.parse(dag.toJSON().data)
const manifest = await io.read(ipfs, db.address.root)
assert.notEqual(manifest, )
assert.equal(manifest.name, 'second')
assert.equal(manifest.type, 'feed')
@ -165,22 +164,21 @@ Object.keys(testAPIs).forEach(API => {
it('creates an access controller and adds ourselves as writer by default', async () => {
db = await orbitdb.create('fourth', 'feed')
assert.deepEqual(db.access.write, [orbitdb.key.getPublic('hex')])
assert.deepEqual(db.access.write, [orbitdb.identity.publicKey])
})
it('creates an access controller and adds writers', async () => {
db = await orbitdb.create('fourth', 'feed', { write: ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')] })
assert.deepEqual(db.access.write, ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')])
})
it('creates an access controller and doesn\'t add an admin', async () => {
db = await orbitdb.create('sixth', 'feed')
assert.deepEqual(db.access.admin, [])
db = await orbitdb.create('fourth', 'feed', {
accessController: {
write: ['another-key', 'yet-another-key', orbitdb.identity.publicKey]
}
})
assert.deepEqual(db.access.write, ['another-key', 'yet-another-key', orbitdb.identity.publicKey])
})
it('creates an access controller and doesn\'t add read access keys', async () => {
db = await orbitdb.create('seventh', 'feed', { read: ['one', 'two'] })
assert.deepEqual(db.access.read, [])
assert.deepEqual(db.access.write, [orbitdb.identity.publicKey])
})
})
})
@ -222,7 +220,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns the address that would have been created', async () => {
db = await orbitdb.create('third', 'feed', { replicate: false })
assert.equal(address.toString().indexOf('/orbitdb'), 0)
assert.equal(address.toString().indexOf('Qm'), 9)
assert.equal(address.toString().indexOf('zd'), 9)
assert.equal(address.toString(), db.address.toString())
})
})
@ -256,21 +254,21 @@ Object.keys(testAPIs).forEach(API => {
it('opens a database - name only', async () => {
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true })
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('abc'), 56)
assert.equal(db.address.toString().indexOf('zd'), 9)
assert.equal(db.address.toString().indexOf('abc'), 59)
})
it('opens the same database - from an address', async () => {
db = await orbitdb.open(db.address)
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('abc'), 56)
assert.equal(db.address.toString().indexOf('zd'), 9)
assert.equal(db.address.toString().indexOf('abc'), 59)
})
it('opens a database and adds the creator as the only writer', async () => {
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true, write: [] })
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true })
assert.equal(db.access.write.length, 1)
assert.equal(db.access.write[0], db.key.getPublic('hex'))
assert.equal(db.access.write[0], db.identity.publicKey)
})
it('doesn\'t open a database if we don\'t have it locally', async () => {
@ -298,6 +296,7 @@ Object.keys(testAPIs).forEach(API => {
await db.add('hello2')
db = await orbitdb.open(db.address)
await db.load()
const res = db.iterator({ limit: -1 }).collect()
@ -307,5 +306,4 @@ Object.keys(testAPIs).forEach(API => {
})
})
})
})

View File

@ -39,7 +39,7 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb = new OrbitDB(ipfs, dbPath)
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
})
after(async () => {

View File

@ -2,6 +2,7 @@
const assert = require('assert')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
const CustomCache = require('orbit-db-cache')
// Include test utilities
@ -29,7 +30,8 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', {
orbitdb1 = await OrbitDB.createInstance(ipfs, {
directory: path.join(dbPath, '1'),
cache: CustomTestCache
})
})

View File

@ -2,6 +2,7 @@
const assert = require('assert')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
const {
@ -28,8 +29,9 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', {
keystore: CustomTestKeystore
orbitdb1 = await OrbitDB.createInstance(ipfs, {
directory: path.join(dbPath, '1'),
keystore: CustomTestKeystore().create()
})
})
@ -58,10 +60,10 @@ Object.keys(testAPIs).forEach(API => {
databases.forEach(async (database) => {
it(database.type + ' allows custom keystore', async () => {
const options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex')
],
accessController: {
// Set write access for both clients
write: [orbitdb1.identity.publicKey],
}
}
const db1 = await database.create(orbitdb1, 'custom-keystore', options)

View File

@ -2,6 +2,7 @@
const assert = require('assert')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
@ -26,14 +27,14 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(config.daemon1.repo)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if (ipfsd)
if (ipfsd)
await stopIpfs(ipfsd)
})
@ -141,11 +142,11 @@ Object.keys(testAPIs).forEach(API => {
const doc2 = { _id: 'sup world', doc: 'some of the things', views: 10}
const expectedOperation = {
op: 'PUT',
key: 'sup world',
value: {
_id: 'sup world',
doc: 'some of the things',
op: 'PUT',
key: 'sup world',
value: {
_id: 'sup world',
doc: 'some of the things',
views: 10
},
}
@ -160,7 +161,7 @@ Object.keys(testAPIs).forEach(API => {
assert.deepEqual(res.payload, expectedOperation)
assert.notEqual(res.next, undefined)
assert.equal(res.next.length, 1)
assert.equal(res.v, 0)
assert.equal(res.v, 1)
assert.notEqual(res.clock, undefined)
assert.equal(res.clock.time, 2)
assert.notEqual(res.key, undefined)
@ -170,10 +171,10 @@ Object.keys(testAPIs).forEach(API => {
describe('Specified index', function() {
beforeEach(async () => {
const options = {
indexBy: 'doc',
replicate: false,
maxHistory: 0
const options = {
indexBy: 'doc',
replicate: false,
maxHistory: 0
}
db = await orbitdb1.docstore(config.dbname, options)
})

View File

@ -30,11 +30,11 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb = new OrbitDB(ipfs, dbPath)
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
})
after(async () => {
if(orbitdb)
if(orbitdb)
await orbitdb.stop()
if (ipfsd)
@ -53,7 +53,7 @@ Object.keys(testAPIs).forEach(API => {
it('removes local database files', async () => {
await db.drop()
assert.equal(fs.existsSync(localDataPath), false)
})
})
})
})
})

View File

@ -3,6 +3,7 @@
const assert = require('assert')
const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
@ -30,11 +31,11 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if (ipfsd)
@ -87,12 +88,12 @@ Object.keys(testAPIs).forEach(API => {
it('adds an item that is > 256 bytes', async () => {
db = await orbitdb1.eventlog('third database')
let msg = new Buffer(1024)
let msg = Buffer.alloc(1024)
msg.fill('a')
const hash = await db.add(msg.toString())
assert.notEqual(hash, null)
assert.equal(hash.startsWith('Qm'), true)
assert.equal(hash.length, 46)
assert.equal(hash.startsWith('zd'), true)
assert.equal(hash.length, 49)
})
})
@ -118,7 +119,7 @@ Object.keys(testAPIs).forEach(API => {
const iter = db.iterator()
const next = iter.next().value
assert.notEqual(next, null)
assert.equal(next.hash.startsWith('Qm'), true)
assert.equal(next.hash.startsWith('zd'), true)
assert.equal(next.payload.key, null)
assert.equal(next.payload.value, 'hello4')
})

View File

@ -3,6 +3,7 @@
const assert = require('assert')
const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
@ -30,11 +31,11 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if (ipfsd)
@ -61,20 +62,20 @@ Object.keys(testAPIs).forEach(API => {
const hash = await db.add('hello1')
const items = db.iterator({ limit: -1 }).collect()
assert.notEqual(hash, null)
assert.equal(hash, last(items).hash)
assert.equal(hash, last(items).cid)
assert.equal(items.length, 1)
})
it('returns the added entry\'s hash, 2 entries', async () => {
db = await orbitdb1.feed(address)
await db.load()
const prevHash = db.iterator().collect()[0].hash
const prevHash = db.iterator().collect()[0].cid
const hash = await db.add('hello2')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 2)
assert.notEqual(hash, null)
assert.notEqual(hash, prevHash)
assert.equal(hash, last(items).hash)
assert.equal(hash, last(items).cid)
})
it('adds five items', async () => {
@ -88,12 +89,12 @@ Object.keys(testAPIs).forEach(API => {
it('adds an item that is > 256 bytes', async () => {
db = await orbitdb1.feed('third')
let msg = new Buffer(1024)
let msg = Buffer.alloc(1024)
msg.fill('a')
const hash = await db.add(msg.toString())
assert.notEqual(hash, null)
assert.equal(hash.startsWith('Qm'), true)
assert.equal(hash.length, 46)
assert.equal(hash.startsWith('zd'), true)
assert.equal(hash.length, 49)
})
it('deletes an item when only one item in the database', async () => {
@ -101,7 +102,7 @@ Object.keys(testAPIs).forEach(API => {
const hash = await db.add('hello3')
const delopHash = await db.remove(hash)
const items = db.iterator().collect()
assert.equal(delopHash.startsWith('Qm'), true)
assert.equal(delopHash.startsWith('zd'), true)
assert.equal(items.length, 0)
})
@ -129,7 +130,7 @@ Object.keys(testAPIs).forEach(API => {
const firstItem = items[0]
const secondItem = items[1]
assert.equal(firstItem.hash.startsWith('Qm'), true)
assert.equal(firstItem.cid.startsWith('zd'), true)
assert.equal(firstItem.payload.key, null)
assert.equal(firstItem.payload.value, 'hello2')
assert.equal(secondItem.payload.value, 'hello3')
@ -158,7 +159,7 @@ Object.keys(testAPIs).forEach(API => {
const iter = db.iterator()
const next = iter.next().value
assert.notEqual(next, null)
assert.equal(next.hash.startsWith('Qm'), true)
assert.equal(next.cid.startsWith('zd'), true)
assert.equal(next.payload.key, null)
assert.equal(next.payload.value, 'hello4')
})
@ -177,7 +178,7 @@ Object.keys(testAPIs).forEach(API => {
const iter = db.iterator()
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, hashes[hashes.length - 1])
assert.equal(first.cid, hashes[hashes.length - 1])
assert.equal(second, null)
assert.equal(first.payload.value, 'hello4')
})
@ -217,7 +218,7 @@ Object.keys(testAPIs).forEach(API => {
const iter = db.iterator({ limit: 0 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(hashes))
assert.equal(first.cid, last(hashes))
assert.equal(second, null)
})
@ -225,7 +226,7 @@ Object.keys(testAPIs).forEach(API => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(hashes))
assert.equal(first.cid, last(hashes))
assert.equal(second, null)
})
@ -235,16 +236,16 @@ Object.keys(testAPIs).forEach(API => {
const second = iter.next().value
const third = iter.next().value
const fourth = iter.next().value
assert.equal(first.hash, hashes[hashes.length - 3])
assert.equal(second.hash, hashes[hashes.length - 2])
assert.equal(third.hash, hashes[hashes.length - 1])
assert.equal(first.cid, hashes[hashes.length - 3])
assert.equal(second.cid, hashes[hashes.length - 2])
assert.equal(third.cid, hashes[hashes.length - 1])
assert.equal(fourth, null)
})
it('returns all items', () => {
const messages = db.iterator({ limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
messages.reverse()
assert.equal(messages.length, hashes.length)
@ -254,7 +255,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns all items when limit is bigger than -1', () => {
const messages = db.iterator({ limit: -300 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, hashes.length)
assert.equal(messages[0], hashes[0])
@ -263,7 +264,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns all items when limit is bigger than number of items', () => {
const messages = db.iterator({ limit: 300 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, hashes.length)
assert.equal(messages[0], hashes[0])
@ -275,7 +276,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns 1 item when gte is the head', () => {
const messages = db.iterator({ gte: last(hashes), limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, 1)
assert.equal(messages[0], last(hashes))
@ -290,7 +291,7 @@ Object.keys(testAPIs).forEach(API => {
const gte = hashes[hashes.length - 2]
const messages = db.iterator({ gte: gte, limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, 2)
assert.equal(messages[0], hashes[hashes.length - 2])
@ -300,7 +301,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns all items when gte is the root item', () => {
const messages = db.iterator({ gte: hashes[0], limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, hashes.length)
assert.equal(messages[0], hashes[0])
@ -310,7 +311,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns items when gt is the root item', () => {
const messages = db.iterator({ gt: hashes[0], limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, itemCount - 1)
assert.equal(messages[0], hashes[1])
@ -320,13 +321,13 @@ Object.keys(testAPIs).forEach(API => {
it('returns items when gt is defined', () => {
const messages = db.iterator({ limit: -1})
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
const gt = messages[2]
const messages2 = db.iterator({ gt: gt, limit: 100 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages2.length, 2)
assert.equal(messages2[0], messages[messages.length - 2])
@ -338,7 +339,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns one item after head when lt is the head', () => {
const messages = db.iterator({ lt: last(hashes) })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, 1)
assert.equal(messages[0], hashes[hashes.length - 2])
@ -347,7 +348,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns all items when lt is head and limit is -1', () => {
const messages = db.iterator({ lt: last(hashes), limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, hashes.length - 1)
assert.equal(messages[0], hashes[0])
@ -357,7 +358,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns 3 items when lt is head and limit is 3', () => {
const messages = db.iterator({ lt: last(hashes), limit: 3 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, 3)
assert.equal(messages[0], hashes[hashes.length - 4])
@ -372,7 +373,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns one item when lte is the root item', () => {
const messages = db.iterator({ lte: hashes[0] })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, 1)
assert.equal(messages[0], hashes[0])
@ -381,7 +382,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns all items when lte is the head', () => {
const messages = db.iterator({ lte: last(hashes), limit: -1 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, itemCount)
assert.equal(messages[0], hashes[0])
@ -391,7 +392,7 @@ Object.keys(testAPIs).forEach(API => {
it('returns 3 items when lte is the head', () => {
const messages = db.iterator({ lte: last(hashes), limit: 3 })
.collect()
.map((e) => e.hash)
.map((e) => e.cid)
assert.equal(messages.length, 3)
assert.equal(messages[0], hashes[hashes.length - 3])

View File

@ -0,0 +1,2 @@
Û{"hash":null,"id":"/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db","payload":{"op":"ADD","key":null,"value":{"thing":"1"}},"next":[],"v":0,"clock":{"id":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78","time":1},"sig":"30460221008067ac541ab9b8ef6de41318220c6927f046188ae63620c34666d1ca00574001022100b9427217bb79b4bdad8645bb361745ffffa0bfa264778202c68aa8f2b9857ada","key":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78"}

View File

@ -0,0 +1,2 @@
h{"name":"v0-db","type":"feed","accessController":"/ipfs/Qmc3S7aMSmH8oGmx7Zdp8UxVWcDyCq5o2H9qYFgT3GW6nM"}

View File

@ -0,0 +1,8 @@
º{
"admin": [],
"write": [
"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78"
],
"read": []
}

View File

@ -0,0 +1,2 @@
{"hash":null,"id":"/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db","payload":{"op":"ADD","key":null,"value":{"thing":"2"}},"next":["QmPoEJkWCkgDkuNdshm6Srw9haEBtgn1e352dkF1wpEfXt"],"v":0,"clock":{"id":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78","time":2},"sig":"30460221008067ac541ab9b8ef6de41318220c6927f046188ae63620c34666d1ca00574001022100b9427217bb79b4bdad8645bb361745ffffa0bfa264778202c68aa8f2b9857ada","key":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78"}

View File

@ -0,0 +1,2 @@
¼{"hash":null,"id":"/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db","payload":{"op":"ADD","key":null,"value":{"thing":"3"}},"next":["QmZvMXmv66vXQ9u2q8UTWPmH59eQUKzVb24bKv8j9zbVuN","QmPoEJkWCkgDkuNdshm6Srw9haEBtgn1e352dkF1wpEfXt"],"v":0,"clock":{"id":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78","time":3},"sig":"30460221008067ac541ab9b8ef6de41318220c6927f046188ae63620c34666d1ca00574001022100b9427217bb79b4bdad8645bb361745ffffa0bfa264778202c68aa8f2b9857ada","key":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78"}

BIN
test/fixtures/ipfs/datastore/000183.ldb vendored Normal file

Binary file not shown.

BIN
test/fixtures/ipfs/datastore/000187.ldb vendored Normal file

Binary file not shown.

BIN
test/fixtures/ipfs/datastore/000192.ldb vendored Normal file

Binary file not shown.

BIN
test/fixtures/ipfs/datastore/000197.ldb vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs/datastore/CURRENT vendored Normal file
View File

@ -0,0 +1 @@
MANIFEST-000200

Binary file not shown.

View File

@ -0,0 +1 @@
{"publicKey":"04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78","privateKey":"31cbd0193567605dba4cd27a0fe4de82151be65aac16b58f2337ec2b7ad41b38"}

View File

@ -0,0 +1 @@
MANIFEST-000371

View File

@ -2,6 +2,7 @@
const assert = require('assert')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
@ -27,11 +28,11 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if (ipfsd)

View File

@ -80,15 +80,15 @@ Object.keys(testAPIs).forEach(API => {
ipfs2 = ipfsd2.api
// Connect the peers manually to speed up test times
await connectPeers(ipfs1, ipfs2)
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
if(orbitdb2)
await orbitdb2.stop()
if (ipfsd1)
@ -102,8 +102,8 @@ Object.keys(testAPIs).forEach(API => {
let options = {}
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
console.log("Creating databases and waiting for peers to connect")
@ -117,7 +117,7 @@ Object.keys(testAPIs).forEach(API => {
localDatabases.push(db)
}
// Open the databases on the second node, set 'sync' flag so that
// Open the databases on the second node, set 'sync' flag so that
// the second peer fetches the db manifest from the network
options = Object.assign({}, options, { sync: true })
for (let [index, dbInterface] of databaseInterfaces.entries()) {
@ -149,7 +149,7 @@ Object.keys(testAPIs).forEach(API => {
for (let i = 1; i < entryCount + 1; i ++)
entryArr.push(i)
// Result state,
// Result state,
// we count how many times 'replicated' event was fired per db
let replicated = {}
localDatabases.forEach(db => {
@ -165,10 +165,11 @@ Object.keys(testAPIs).forEach(API => {
// Write entries to each database
console.log("Writing to databases")
databaseInterfaces.forEach((dbInterface, index) => {
for (let index = 0; index < databaseInterfaces.length; index++) {
const dbInterface = databaseInterfaces[index]
const db = localDatabases[index]
mapSeries(entryArr, val => dbInterface.write(db, val))
})
await mapSeries(entryArr, val => dbInterface.write(db, val))
}
// Function to check if all databases have been replicated,
// we calculate this by checking number of 'replicated' events fired

View File

@ -1,265 +0,0 @@
'use strict'
const fs = require('fs')
const rmrf = require('rimraf')
const path = require('path')
const assert = require('assert')
const pMap = require('p-map')
const pEachSeries = require('p-each-series')
const pWhilst = require('p-whilst')
const OrbitDB = require('../src/OrbitDB')
const startIpfs = require('./utils/start-ipfs')
// Settings for the test ipfs daemons
const config = require('./utils/config.js')
describe.skip('OrbitDB - Network Stress Tests', function() {
// We need a huge timeout since we're running
// very long-running tests (takes minutes)
this.timeout(1000 * 60 * 60) // 1 hour
const tests = [
{
description: '1 update - 2 peers - as fast as possible',
updates: 1,
maxInterval: -1,
minInterval: 0,
sequential: false,
content: 'Hello #',
clients: [
{ name: 'daemon1' },
{ name: 'daemon2' },
// { name: 'daemon3' },
// { name: 'daemon4' },
// { name: 'daemon5' },
// { name: 'daemon6' },
// Don't go beyond 6...
// { name: 'daemon7' },
// { name: 'daemon8' },
],
},
{
description: '32 update - concurrent - 2 peers - random interval',
updates: 32,
maxInterval: 2000,
minInterval: 10,
sequential: false,
content: 'Hello random! ',
clients: [
{ name: 'daemon1' },
{ name: 'daemon2' },
],
},
{
description: '1000 update concurrently - 2 peers - as fast as possible',
updates: 1000,
maxInterval: -1,
minInterval: 0,
sequential: false,
content: 'Hello #',
clients: [
{ name: 'daemon1' },
{ name: 'daemon2' },
],
},
{
description: '200 update as Buffers sequentially - 2 peers - as fast as possible',
updates: 200,
maxInterval: -1,
minInterval: 0,
sequential: true,
content: Buffer.from('👻'),
clients: [
{ name: 'daemon1' },
{ name: 'daemon2' },
],
},
{
description: '50 update over a period long time - 6 peers - slow, random write intervals',
updates: 50,
maxInterval: 3000,
minInterval: 1000,
sequential: false,
content: 'Terve! ',
clients: [
{ name: 'daemon1' },
{ name: 'daemon2' },
{ name: 'daemon3' },
{ name: 'daemon4' },
{ name: 'daemon5' },
{ name: 'daemon6' },
],
},
{
description: '50 update over a period long time - 8 peers - slow, random write intervals',
updates: 100,
maxInterval: 3000,
minInterval: 1000,
sequential: false,
content: 'Terve! ',
clients: [
{ name: 'daemon1' },
{ name: 'daemon2' },
{ name: 'daemon3' },
{ name: 'daemon4' },
{ name: 'daemon5' },
{ name: 'daemon6' },
{ name: 'daemon7' },
{ name: 'daemon8' },
],
},
]
const rootPath = './orbitdb/network-tests/'
const channelName = 'orbitdb-network-stress-tests'
tests.forEach(test => {
it(test.description, (done) => {
const updateCount = test.updates
const maxInterval = test.maxInterval || -1
const minInterval = test.minInterval || 0
const sequential = test.sequential
const clientData = test.clients
rmrf.sync(rootPath)
// Create IPFS instances
const createIpfsInstance = (c) => {
const repoPath = path.join(rootPath, c.name, '/ipfs' + new Date().getTime())
console.log("Starting IPFS instance <<>>", repoPath)
return startIpfs(Object.assign({}, config.defaultIpfsConfig, {
repo: repoPath,
start: true,
}))
}
const createOrbitDB = async (databaseConfig, ipfs) => {
const orbitdb = new OrbitDB(ipfs, path.join('./orbitdb/network-tests/', databaseConfig.name))
const db = await orbitdb.eventlog(databaseConfig.address, {
write: ['*']
})
return db
}
let allTasks = []
const setupAllTasks = (databases) => {
// Create the payloads
let texts = []
for (let i = 1; i < updateCount + 1; i ++) {
texts.push(test.content + i)
}
const setupUpdates = (client) => texts.reduce((res, acc) => {
return res.concat([{ db: client, content: acc }])
}, [])
allTasks = databases.map(db => {
return {
name: db.id,
tasks: setupUpdates(db),
}
})
}
const runAllTasks = () => {
if (sequential) {
return pEachSeries(allTasks, e => pEachSeries(e.tasks, writeToDB))
.then(() => console.log())
} else {
return pMap(allTasks, e => pEachSeries(e.tasks, writeToDB))
.then(() => console.log())
}
}
let i = 0
const writeToDB = (task) => {
return new Promise((resolve, reject) => {
if (maxInterval === -1) {
task.db.add(task.content)
.then(() => process.stdout.write(`\rUpdates (${databases.length} peers): ${Math.floor(++i)} / ${updateCount}`))
.then(resolve)
.catch(reject)
} else {
setTimeout(() => {
task.db.add(task.content)
.then(() => process.stdout.write(`\rUpdates (${databases.length} peers): ${Math.floor(++i)} / ${updateCount}`))
.then(resolve)
.catch(reject)
}, Math.floor(Math.random() * maxInterval) + minInterval)
}
})
}
const waitForAllTasks = (address) => {
let msgCount = 0
return pWhilst(
() => msgCount < databases.length * databases.length * updateCount,
() => new Promise(resolve => {
return queryDatabases(address)
.then(res => {
msgCount = res.reduce((val, acc) => val += acc.length, 0)
})
.then(() => process.stdout.write(`\rUpdated (${databases.length} peers): ` + msgCount.toString() + ' / ' + (updateCount * databases.length * databases.length)))
.then(() => setTimeout(resolve, 100))
})
)
.then(() => process.stdout.write(`\rUpdated (${databases.length} peers): ` + msgCount.toString() + ' / ' + (updateCount * databases.length * databases.length) + '\n'))
}
const queryDatabases = () => {
return pMap(databases, db => db.iterator({ limit: -1 }).collect(), { concurrency: 2 })
}
// All our databases instances
let databases = []
let addr
// Start the test
pMap(clientData, (c, idx) => {
return createIpfsInstance(c)
.then(async (ipfs) => {
let db
if (idx === 0 && !addr) {
c.address = channelName
db = await createOrbitDB(c, ipfs)
addr = db.address.toString()
} else if (addr) {
c.address = addr
db = await createOrbitDB(c, ipfs)
} else {
console.error("Address not defined!")
}
return db
})
}, { concurrency: 1 })
.then((result) => databases = result)
.then(() => setupAllTasks(databases))
.then(() => console.log(`Applying ${updateCount} updates per peer. This will take a while...`))
.then(() => runAllTasks())
.then(() => console.log('Done. Waiting for all updates to reach the peers...'))
.then(() => waitForAllTasks(addr))
.then(() => queryDatabases())
.then((result) => {
// Both databases have the same amount of entries
result.forEach(entries => {
assert.equal(entries.length, updateCount * databases.length)
})
// Both databases have the same entries in the same order
result.reduce((prev, entries) => {
assert.deepEqual(entries, prev)
return entries
}, result[0])
// Success! Cleanup and finish
pEachSeries(databases, db => {
db.close()
db._ipfs.stop()
})
.then(() => done())
})
.catch(done)
})
})
})

View File

@ -27,7 +27,7 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb = new OrbitDB(ipfs, dbPath)
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath })
})
after(async () => {
@ -50,43 +50,42 @@ Object.keys(testAPIs).forEach(API => {
})
it('parse address successfully', () => {
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC/first-database'
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
const result = OrbitDB.parseAddress(address)
const isInstanceOf = result instanceof OrbitDBAddress
assert.equal(isInstanceOf, true)
assert.equal(result.root, 'Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC')
assert.equal(result.root, 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13')
assert.equal(result.path, 'first-database')
assert.equal(result.toString().indexOf('/orbitdb'), 0)
assert.equal(result.toString().indexOf('Qm'), 9)
assert.equal(result.toString().indexOf('zd'), 9)
})
})
describe('isValid Address', () => {
it('throws an error if address is empty', () => {
assert.throws(() => {
const result = OrbitDB.isValidAddress('')
})
it('returns false for empty string', () => {
const result = OrbitDB.isValidAddress('')
assert.equal(result, false)
})
it('validate address successfully', () => {
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC/first-database'
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
const result = OrbitDB.isValidAddress(address)
assert.equal(result, true)
})
it('handle missing orbitdb prefix', () => {
const address = 'Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC/first-database'
const address = 'zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13/first-database'
const result = OrbitDB.isValidAddress(address)
assert.equal(result, true)
})
it('handle missing db address name', () => {
const address = '/orbitdb/Qmdgwt7w4uBsw8LXduzCd18zfGXeTmBsiR8edQ1hSfzcJC'
const address = '/orbitdb/zdpuAuK3BHpS7NvMBivynypqciYCuy2UW77XYBPUYRnLjnw13'
const result = OrbitDB.isValidAddress(address)
assert.equal(result, true)

View File

@ -3,6 +3,7 @@
const assert = require('assert')
const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
@ -20,11 +21,11 @@ const ipfsPath = './orbitdb/tests/persistency/ipfs'
const tests = [
{
title: 'Persistency',
orbitDBConfig: {}
orbitDBConfig: { directory: path.join(dbPath, '1') }
},
{
title: 'Persistency with custom cache',
orbitDBConfig: { cache: CustomTestCache }
orbitDBConfig: { directory: path.join(dbPath, '1'), cache: CustomTestCache }
}
]
@ -43,7 +44,7 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', test.orbitDBConfig)
orbitdb1 = await OrbitDB.createInstance(ipfs, test.orbitDBConfig)
})
after(async () => {

View File

@ -38,17 +38,17 @@ Object.keys(testAPIs).forEach(API => {
ipfsd2 = await startIpfs(API, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
// Connect the peers manually to speed up test times
await connectPeers(ipfs1, ipfs2)
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
if(orbitdb2)
await orbitdb2.stop()
if (ipfsd1)
@ -63,8 +63,8 @@ Object.keys(testAPIs).forEach(API => {
const openDatabases1 = async (options) => {
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
options = Object.assign({}, options, { path: dbPath1 })
@ -77,8 +77,8 @@ Object.keys(testAPIs).forEach(API => {
const openDatabases = async (options) => {
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
options = Object.assign({}, options, { path: dbPath1, create: true })
@ -129,10 +129,12 @@ Object.keys(testAPIs).forEach(API => {
// Set write access for both clients
let options = {
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
accessController: {
write: [
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
}
}
// Get the previous address to make sure nothing mutates it

View File

@ -38,8 +38,8 @@ Object.keys(testAPIs).forEach(API => {
ipfsd2 = await startIpfs(API, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
// Connect the peers manually to speed up test times
await connectPeers(ipfs1, ipfs2)
})
@ -62,8 +62,8 @@ Object.keys(testAPIs).forEach(API => {
let options = {}
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
options = Object.assign({}, options, { path: dbPath1 })
@ -137,7 +137,7 @@ Object.keys(testAPIs).forEach(API => {
reject(new Error("Should not receive the 'replicated' event!"))
})
// Can't check this for now as db1 might've sent the heads to db2
// Can't check this for now as db1 might've sent the heads to db2
// before we subscribe to the event
db2.events.on('replicate.progress', (address, hash, entry) => {
try {

View File

@ -1,4 +1,4 @@
'use strict'
'use strict'
const assert = require('assert')
const mapSeries = require('p-each-series')
@ -23,7 +23,7 @@ const ipfsPath2 = './orbitdb/tests/replication/2/ipfs'
Object.keys(testAPIs).forEach(API => {
describe(`orbit-db - Replication (${API})`, function() {
this.timeout(config.timeout)
this.timeout(100000)
let ipfsd1, ipfsd2, ipfs1, ipfs2
let orbitdb1, orbitdb2, db1, db2
@ -45,10 +45,10 @@ Object.keys(testAPIs).forEach(API => {
ipfs2 = ipfsd2.api
// Use memory store for quicker tests
const memstore = new MemStore()
ipfs1.object.put = memstore.put.bind(memstore)
ipfs1.object.get = memstore.get.bind(memstore)
ipfs2.object.put = memstore.put.bind(memstore)
ipfs2.object.get = memstore.get.bind(memstore)
ipfs1.dag.put = memstore.put.bind(memstore)
ipfs1.dag.get = memstore.get.bind(memstore)
ipfs2.dag.put = memstore.put.bind(memstore)
ipfs2.dag.get = memstore.get.bind(memstore)
// Connect the peers manually to speed up test times
await connectPeers(ipfs1, ipfs2)
})
@ -63,43 +63,43 @@ Object.keys(testAPIs).forEach(API => {
beforeEach(async () => {
clearInterval(timer)
orbitdb1 = await OrbitDB.createInstance(ipfs1, { directory: dbPath1 })
orbitdb2 = await OrbitDB.createInstance(ipfs2, { directory: dbPath2 })
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
options = {
options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
accessController: {
write: [
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
}
}
options = Object.assign({}, options, { path: dbPath1 })
options = Object.assign({}, options, { directory: dbPath1 })
db1 = await orbitdb1.eventlog('replication-tests', options)
})
afterEach(async () => {
clearInterval(timer)
options = {}
if (db1)
await db1.drop()
if (db2)
await db2.drop()
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
if(orbitdb2)
await orbitdb2.stop()
})
it('replicates database of 1 entry', async () => {
// Set 'sync' flag on. It'll prevent creating a new local database and rather
// fetch the database from the network
options = Object.assign({}, options, { path: dbPath2, sync: true })
options = Object.assign({}, options, { directory: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
@ -115,7 +115,7 @@ Object.keys(testAPIs).forEach(API => {
})
it('replicates database of 100 entries', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
options = Object.assign({}, options, { directory: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
@ -147,7 +147,7 @@ Object.keys(testAPIs).forEach(API => {
})
it('emits correct replication info', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
options = Object.assign({}, options, { directory: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
@ -158,18 +158,18 @@ Object.keys(testAPIs).forEach(API => {
db2.events.on('replicate', (address, entry) => {
eventCount['replicate'] ++
events.push({
event: 'replicate',
count: eventCount['replicate'],
events.push({
event: 'replicate',
count: eventCount['replicate'],
entry: entry,
})
})
db2.events.on('replicate.progress', (address, hash, entry, progress, total) => {
eventCount['replicate.progress'] ++
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
entry: entry ,
replicationInfo: {
max: db2.replicationStatus.max,
@ -180,15 +180,15 @@ Object.keys(testAPIs).forEach(API => {
db2.events.on('replicated', (address) => {
eventCount['replicated'] ++
events.push({
event: 'replicated',
count: eventCount['replicate'],
events.push({
event: 'replicated',
count: eventCount['replicate'],
replicationInfo: {
max: db2.replicationStatus.max,
progress: db2.replicationStatus.progress,
},
})
// Resolve with a little timeout to make sure we
// Resolve with a little timeout to make sure we
// don't receive more than one event
setTimeout(() => {
finished = db2.iterator({ limit: -1 }).collect().length === expectedEventCount
@ -256,18 +256,12 @@ Object.keys(testAPIs).forEach(API => {
}
await mapSeries(adds, add)
console.log()
// Open second instance again
options = {
path: dbPath2,
directory: dbPath2 + '1',
overwrite: true,
sync: true,
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
@ -275,9 +269,9 @@ Object.keys(testAPIs).forEach(API => {
db2.events.on('replicate', (address, entry) => {
eventCount['replicate'] ++
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', db2.replicationStatus.progress, '/', db2.replicationStatus.max, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
events.push({
event: 'replicate',
count: eventCount['replicate'],
events.push({
event: 'replicate',
count: eventCount['replicate'],
entry: entry,
})
})
@ -286,9 +280,9 @@ Object.keys(testAPIs).forEach(API => {
eventCount['replicate.progress'] ++
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', db2.replicationStatus.progress, '/', db2.replicationStatus.max, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// assert.equal(db2.replicationStatus.progress, eventCount['replicate.progress'])
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
entry: entry ,
replicationInfo: {
max: db2.replicationStatus.max,
@ -303,27 +297,24 @@ Object.keys(testAPIs).forEach(API => {
try {
// Test the replicator state
assert.equal(db2._loader.tasksRequested >= db2.replicationStatus.progress, true)
assert.equal(db2._loader.tasksQueued <= db2.options.referenceCount, true)
assert.equal(db2.options.referenceCount, 64)
assert.equal(db2._loader.tasksRunning, 0)
assert.equal(db2._loader.tasksFinished, db2.replicationStatus.progress)
} catch (e) {
reject(e)
}
events.push({
event: 'replicated',
count: eventCount['replicate'],
events.push({
event: 'replicated',
count: eventCount['replicate'],
replicationInfo: {
max: db2.replicationStatus.max,
progress: db2.replicationStatus.progress,
},
})
// Resolve with a little timeout to make sure we
// Resolve with a little timeout to make sure we
// don't receive more than one event
setTimeout( async () => {
// console.log(eventCount['replicate.progress'], expectedEventCount)
if (eventCount['replicate.progress'] === expectedEventCount) {
if (eventCount['replicated'] === expectedEventCount) {
finished = true
}
}, 100)
@ -386,25 +377,21 @@ Object.keys(testAPIs).forEach(API => {
// Open second instance again
let options = {
path: dbPath2,
directory: dbPath2,
overwrite: true,
sync: true,
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
assert.equal(db1.address.toString(), db2.address.toString())
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
db2.events.on('replicate', (address, entry) => {
eventCount['replicate'] ++
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
events.push({
event: 'replicate',
count: eventCount['replicate'],
events.push({
event: 'replicate',
count: eventCount['replicate'],
entry: entry,
})
})
@ -414,9 +401,9 @@ Object.keys(testAPIs).forEach(API => {
eventCount['replicate.progress'] ++
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// assert.equal(current, total)
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
entry: entry ,
replicationInfo: {
max: db2.replicationStatus.max,
@ -435,9 +422,9 @@ Object.keys(testAPIs).forEach(API => {
reject(e)
}
events.push({
event: 'replicated',
count: eventCount['replicate'],
events.push({
event: 'replicated',
count: eventCount['replicate'],
replicationInfo: {
max: db2.replicationStatus.max,
progress: db2.replicationStatus.progress,
@ -445,7 +432,7 @@ Object.keys(testAPIs).forEach(API => {
})
if (db2.replicationStatus.max >= expectedEventCount * 2
&& db2.replicationStatus.progress >= expectedEventCount * 2)
&& db2.replicationStatus.progress >= expectedEventCount * 2)
finished = true
})
@ -501,4 +488,4 @@ Object.keys(testAPIs).forEach(API => {
})
})
})
})
})

View File

@ -36,16 +36,16 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath2)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath1)
orbitdb2 = new OrbitDB(ipfs, dbPath2)
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: dbPath1 })
orbitdb2 = await OrbitDB.createInstance(ipfs, { directory: dbPath2 })
db = await orbitdb1.log('replication status tests')
})
after(async () => {
if(orbitdb1)
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
if(orbitdb2)
await orbitdb2.stop()
if (ipfsd)

View File

@ -25,8 +25,6 @@ module.exports = {
}
},
daemon1: {
repo: './ipfs/orbitdb/tests/daemon1',
start: true,
EXPERIMENTAL: {
pubsub: true
},
@ -49,8 +47,6 @@ module.exports = {
},
},
daemon2: {
repo: './ipfs/orbitdb/tests/daemon2',
start: true,
EXPERIMENTAL: {
pubsub: true
},
@ -71,5 +67,5 @@ module.exports = {
}
},
},
},
}
}

View File

@ -5,49 +5,47 @@ const ec = new EC('secp256k1')
* A custom keystore example
*/
class CustomTestKeystore {
constructor(signer) {
this.createKey();
constructor (storage) {
// Use just one key throughout the keystore
// for mock purposes
this.key = this.createKey()
}
createKey() {
hasKey () {
return this.key !== undefined ? true : false
}
createKey (id) {
const key = ec.genKeyPair()
this.key = ec.keyPair({
pub: key.getPublic('hex'),
const keyPair = {
public: {
marshal: () => key.getPublic('hex')
},
priv: key.getPrivate('hex'),
privEnc: 'hex',
pubEnc: 'hex',
})
}
return keyPair
}
getKey (id) {
return this.key
}
getKey() {
return this.key
sign (key, data) {
return Promise.resolve('<signature>')
}
// TODO: check if this is really in use
generateKey() {
return Promise.resolve(this.createKey())
}
importPublicKey(key) {
return Promise.resolve(ec.keyFromPublic(key, 'hex'))
}
importPrivateKey(key) {
return Promise.resolve(ec.keyFromPrivate(key, 'hex'))
}
sign(key, data) {
const sig = ec.sign(data, key)
return Promise.resolve(sig.toDER('hex'))
}
verify(signature, key, data) {
let res = false
res = ec.verify(data, signature, key)
return Promise.resolve(res)
verify (signature, publicKey, data) {
return Promise.resolve(true)
}
}
module.exports = new CustomTestKeystore()
module.exports = (LocalStorage, mkdir) => {
return {
create: (directory) => {
return new CustomTestKeystore()
}
}
}

View File

@ -1,95 +1,63 @@
'use strict'
const multihashing = require('multihashing-async')
const mh = require('multihashes')
const CID = require('cids')
const pify = require('pify')
const defaultHashAlg = 'sha2-256'
const createMultihash = pify(multihashing)
// 'use strict'
// const ImmutableDB = require('./immutabledb-interface')
const defaultFormat = { format: 'dag-cbor', hashAlg: 'sha2-256' }
/* ImmutableDB using IPLD (through IPFS) */
class IPLDStore {
constructor (ipfs) {
// super()
this._ipfs = ipfs
const transformCborLinksIntoCids = (data) => {
if (!data) {
return data
}
async put (value) {
const cid = await this._ipfs.dag.put(value, defaultFormat)
return cid.toBaseEncodedString()
if (data['/']) {
return new CID(data['/'])
}
async get (key) {
const result = await this._ipfs.dag.get(key)
return result.value
if (Array.isArray(data)) {
return data.map(transformCborLinksIntoCids)
}
if (typeof data === 'object') {
return Object.keys(data).reduce((obj, key) => {
obj[key] = transformCborLinksIntoCids(data[key])
return obj
}, {})
}
return data
}
const createMultihash = (data, hashAlg) => {
return new Promise((resolve, reject) => {
multihashing(data, hashAlg || defaultHashAlg, (err, multihash) => {
if (err)
return reject(err)
resolve(mh.toB58String(multihash))
})
})
}
// const LRU = require('lru')
// const ImmutableDB = require('./immutabledb-interface')
// const createMultihash = require('./create-multihash')
/* Memory store using an LRU cache */
class MemStore {
constructor () {
this._store = {}//new LRU(1000)
this._store = new Map()
}
async put (value) {
const data = value//new Buffer(JSON.stringify(value))
const hash = await createMultihash(data)
// console.log(this._store)
// this._store.set(hash, data)
if (!this._store) this._store = {}
// console.log(this._store)
// console.log(hash, data)
this._store[hash] = data
// return hash
return {
toJSON: () => {
return {
data: value,
multihash: hash,
}
}
}
const buffer = Buffer.from(JSON.stringify(value))
const multihash = await createMultihash(buffer, 'sha2-256')
const cid = new CID(1, 'dag-cbor', multihash)
const key = cid.toBaseEncodedString()
this._store.set(key, value)
return cid
}
async get (key) {
// const data = this._store.get(key)
const data = this._store[key]
async get (cid) {
if (CID.isCID(cid)) {
cid = cid.toBaseEncodedString()
}
// if (data) {
// const value = JSON.parse(data)
// return value
// }
const data = this._store.get(cid)
// return data
return {
toJSON: () => {
return {
data: this._store[key],
multihash: key,
}
}
value: transformCborLinksIntoCids(data)
}
}
}
module.exports = MemStore

View File

@ -23,19 +23,21 @@ const startIpfs = (type, config = {}) => {
IPFSFactory
.create(testAPIs[type])
.spawn(config, async (err, ipfsd) => {
if (err) {
reject(err)
if (err) {
reject(err)
}
// Monkey patch _peerInfo to the ipfs api/instance
// to make js-ipfs-api compatible with js-ipfs
// TODO: Get IPFS id via coherent API call (without it being asynchronous)
if (!ipfsd.api._peerInfo) {
let { id } = await ipfsd.api.id()
ipfsd.api._peerInfo = { id: { _idB58String: id } }
}
setTimeout(async () => {
if (!ipfsd.api._peerInfo) {
let { id } = await ipfsd.api.id()
ipfsd.api._peerInfo = { id: { _idB58String: id } }
}
resolve(ipfsd)
resolve(ipfsd)
}, 500)
})
})
}

105
test/v0-open-load.js Normal file
View File

@ -0,0 +1,105 @@
'use strict'
const assert = require('assert')
const mapSeries = require('p-map-series')
const fs = require('fs-extra')
const path = require('path')
const rmrf = require('rimraf')
const levelup = require('levelup')
const leveldown = require('leveldown')
const OrbitDB = require('../src/OrbitDB')
const OrbitDBAddress = require('../src/orbit-db-address')
const io = require('orbit-db-io')
const IPFS = require('ipfs')
const Identities = require('orbit-db-identity-provider')
const migrate = require('localstorage-level-migration')
// Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/v0'
const keyFixtures = './test/fixtures/keys/QmRfPsKJs9YqTot5krRibra4gPwoK4kghhU8iKWxBjGDDX'
const dbFixturesDir = './test/fixtures/v0'
const ipfsFixturesDir = './test/fixtures/ipfs'
Object.keys(testAPIs).forEach(API => {
describe(`orbit-db - Backward-Compatibility - Open & Load (${API})`, function() {
this.timeout(config.timeout)
let ipfsd, ipfs, orbitdb, db, address
let localDataPath
before(async () => {
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
//copy data files to ipfs and orbitdb repos
await fs.copy(path.join(ipfsFixturesDir, 'blocks'), path.join(ipfsd.path, 'blocks'))
await fs.copy(path.join(ipfsFixturesDir, 'datastore'), path.join(ipfsd.path, 'datastore'))
await fs.copy(dbFixturesDir, dbPath)
let identity = await Identities.createIdentity({ id: ipfs._peerInfo.id._idB58String, migrate: migrate(keyFixtures), identityKeysPath: dbPath + '/keys' })
orbitdb = await OrbitDB.createInstance(ipfs, { directory: dbPath, identity })
})
after(async () => {
rmrf.sync(dbPath)
if(orbitdb)
await orbitdb.stop()
if (ipfsd)
await stopIpfs(ipfsd)
})
describe('Open & Load', function() {
before(async () => {
db = await orbitdb.open('/orbitdb/QmWDUfC4zcWJGgc9UHn1X3qQ5KZqBv4KCiCtjnpMmBT8JC/v0-db')
await db.load()
})
after(async () => {
if (db)
await db.close()
})
it('open v0 orbitdb address', async () => {
assert.notEqual(db, null)
})
it('database has the correct v0 address', async () => {
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('v0-db'), 56)
})
it('has the correct type', async () => {
assert.equal(db.type, 'feed')
})
it('database has the correct access-controller', async () => {
assert.equal(db.options.accessControllerAddress, '/ipfs/Qmc3S7aMSmH8oGmx7Zdp8UxVWcDyCq5o2H9qYFgT3GW6nM')
assert.equal(db.access.type, 'ipfs')
assert.strictEqual(db.access.write[0], '04b54f6ef529cd2dd2f9c6897a382c492222d42e57826269a38101ffe752aa07260ecd092a970d7eef08c4ddae2b7006ee25f07e4ab62fa5262ae3b51fdea29f78')
})
it('load v0 orbitdb address', async () => {
assert.equal(db.all.length, 3)
})
it('allows migrated key to write', async () => {
const hash = await db.add({ thing: 'new addition'})
const newEntries = db.all.filter(e => e.v === 1)
assert.equal(newEntries.length, 1)
assert.strictEqual(newEntries[0].cid, hash)
})
})
})
})

View File

@ -2,6 +2,7 @@
const assert = require('assert')
const rmrf = require('rimraf')
const path = require('path')
const OrbitDB = require('../src/OrbitDB')
// Include test utilities
@ -10,7 +11,7 @@ const {
startIpfs,
stopIpfs,
testAPIs,
databases,
databases
} = require('./utils')
const dbPath = './orbitdb/tests/write-permissions'
@ -28,8 +29,8 @@ Object.keys(testAPIs).forEach(API => {
rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
orbitdb2 = new OrbitDB(ipfs, dbPath + '/2')
orbitdb1 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '1') })
orbitdb2 = await OrbitDB.createInstance(ipfs, { directory: path.join(dbPath, '2') })
})
after(async () => {
@ -48,10 +49,12 @@ Object.keys(testAPIs).forEach(API => {
it(database.type + ' allows multiple writers', async () => {
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
accessController: {
write: [
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
],
}
}
const db1 = await database.create(orbitdb1, 'sync-test', options)
@ -75,10 +78,12 @@ Object.keys(testAPIs).forEach(API => {
it(database.type + ' syncs', async () => {
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
accessController: {
write: [
orbitdb1.identity.publicKey,
orbitdb2.identity.publicKey
]
}
}
const db1 = await database.create(orbitdb1, 'sync-test', options)
@ -108,7 +113,9 @@ Object.keys(testAPIs).forEach(API => {
it(database.type + ' syncs', async () => {
let options = {
// Set write permission for everyone
write: ['*'],
accessController: {
write: ['*']
}
}
const db1 = await database.create(orbitdb1, 'sync-test-public-dbs', options)
@ -124,8 +131,8 @@ Object.keys(testAPIs).forEach(API => {
setTimeout(async () => {
const value = database.getTestValue(db1)
assert.deepEqual(value, database.expectedValue)
await db1.close()
await db2.close()
await db1.close()
await db2.close()
resolve()
}, 300)
})
@ -139,14 +146,14 @@ Object.keys(testAPIs).forEach(API => {
let options = {
// Only peer 1 can write
write: [orbitdb1.key.getPublic('hex')],
accessController: {
write: [orbitdb1.identity.publicKey]
}
}
let err
options = Object.assign({}, options, { path: dbPath + '/sync-test/1' })
options = Object.assign({}, options, { path: path.join(dbPath, '/sync-test/1') })
const db1 = await database.create(orbitdb1, 'write error test 1', options)
options = Object.assign({}, options, { path: dbPath + '/sync-test/2', sync: true })
options = Object.assign({}, options, { path: path.join(dbPath, '/sync-test/2'), sync: true })
const db2 = await database.create(orbitdb2, 'write error test 1', options)
try {
@ -156,8 +163,9 @@ Object.keys(testAPIs).forEach(API => {
await database.tryInsert(db2)
} catch (e) {
// Make sure peer 2's instance throws an error
assert.equal(e.toString(), 'Error: Not allowed to write')
err = e.toString()
}
assert.equal(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
// Make sure nothing was added to the database
assert.equal(database.query(db1).length, 0)
@ -170,10 +178,10 @@ Object.keys(testAPIs).forEach(API => {
setTimeout(async () => {
// Make sure nothing was added
assert.equal(database.query(db1).length, 0)
await db1.close()
await db2.close()
if (err) {
reject(err)
await db1.close()
await db2.close()
if (!err) {
reject(new Error('tryInsert should throw an err'))
} else {
resolve()
}
@ -188,7 +196,9 @@ Object.keys(testAPIs).forEach(API => {
it(database.type + ' throws an error', async () => {
let options = {
// No write access (only creator of the database can write)
write: [],
accessController: {
write: []
}
}
let err
@ -200,7 +210,7 @@ Object.keys(testAPIs).forEach(API => {
} catch (e) {
err = e.toString()
}
assert.equal(err, 'Error: Not allowed to write')
assert.equal(err, `Error: Could not append entry, key "${orbitdb2.identity.id}" is not allowed to write to the log`)
})
})
})