Merge pull request #335 from orbitdb/feat/tests-with-ipfsd-ctl

Use ipfsd-ctl to create test IPFS instances
This commit is contained in:
Haad 2018-03-27 14:37:11 +02:00 committed by GitHub
commit 99c69b6b74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 2910 additions and 2424 deletions

232
package-lock.json generated
View File

@ -295,6 +295,12 @@
"integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==", "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==",
"dev": true "dev": true
}, },
"asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=",
"dev": true
},
"atob": { "atob": {
"version": "2.0.3", "version": "2.0.3",
"resolved": "https://registry.npmjs.org/atob/-/atob-2.0.3.tgz", "resolved": "https://registry.npmjs.org/atob/-/atob-2.0.3.tgz",
@ -1823,6 +1829,32 @@
"integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
"dev": true "dev": true
}, },
"comandante": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/comandante/-/comandante-0.0.1.tgz",
"integrity": "sha1-gFHaYILocgiq3VK9gXJ1t8gDUAk=",
"dev": true,
"requires": {
"duplexer": "0.0.4"
},
"dependencies": {
"duplexer": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.0.4.tgz",
"integrity": "sha1-r8t/H4uNdPggcmFx1dZKyeSo/yA=",
"dev": true
}
}
},
"combined-stream": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz",
"integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=",
"dev": true,
"requires": {
"delayed-stream": "1.0.0"
}
},
"commander": { "commander": {
"version": "2.15.1", "version": "2.15.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
@ -1956,6 +1988,12 @@
"integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=", "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=",
"dev": true "dev": true
}, },
"cookiejar": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/cookiejar/-/cookiejar-2.1.1.tgz",
"integrity": "sha1-Qa1XsbVVlR7BcUEqgZQrHoIA00o=",
"dev": true
},
"copy-concurrently": { "copy-concurrently": {
"version": "1.0.5", "version": "1.0.5",
"resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz", "resolved": "https://registry.npmjs.org/copy-concurrently/-/copy-concurrently-1.0.5.tgz",
@ -2257,6 +2295,12 @@
"isobject": "3.0.1" "isobject": "3.0.1"
} }
}, },
"delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
"dev": true
},
"delegates": { "delegates": {
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz", "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
@ -2393,6 +2437,12 @@
"create-hmac": "1.1.6" "create-hmac": "1.1.6"
} }
}, },
"duplexer": {
"version": "0.1.1",
"resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.1.tgz",
"integrity": "sha1-rOb/gIwc5mtX0ev5eXessCM0z8E=",
"dev": true
},
"duplexer3": { "duplexer3": {
"version": "0.1.4", "version": "0.1.4",
"resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz",
@ -2962,6 +3012,12 @@
"resolved": "https://registry.npmjs.org/expand-template/-/expand-template-1.1.0.tgz", "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-1.1.0.tgz",
"integrity": "sha512-kkjwkMqj0h4w/sb32ERCDxCQkREMCAgS39DscDnSwDsbxnwwM1BTZySdC3Bn1lhY7vL08n9GoO/fVTynjDgRyQ==" "integrity": "sha512-kkjwkMqj0h4w/sb32ERCDxCQkREMCAgS39DscDnSwDsbxnwwM1BTZySdC3Bn1lhY7vL08n9GoO/fVTynjDgRyQ=="
}, },
"extend": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/extend/-/extend-3.0.1.tgz",
"integrity": "sha1-p1Xqe8Gt/MWjHOfnYtuq3F5jZEQ=",
"dev": true
},
"extend-shallow": { "extend-shallow": {
"version": "3.0.2", "version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
@ -3164,6 +3220,23 @@
"integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
"dev": true "dev": true
}, },
"form-data": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz",
"integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=",
"dev": true,
"requires": {
"asynckit": "0.4.0",
"combined-stream": "1.0.6",
"mime-types": "2.1.18"
}
},
"formidable": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/formidable/-/formidable-1.2.1.tgz",
"integrity": "sha512-Fs9VRguL0gqGHkXS5GQiMCr1VhZBxz0JnJs4JmMp/2jL18Fmbzvv7vOFRU+U8TBkHEE/CX1qDXzJplVULgsLeg==",
"dev": true
},
"fragment-cache": { "fragment-cache": {
"version": "0.2.1", "version": "0.2.1",
"resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
@ -5382,6 +5455,12 @@
"integrity": "sha1-EPIJmg18BaQPK+r1wdOc8vfavzY=", "integrity": "sha1-EPIJmg18BaQPK+r1wdOc8vfavzY=",
"dev": true "dev": true
}, },
"hat": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/hat/-/hat-0.0.3.tgz",
"integrity": "sha1-uwFKnmSzeIrtgAWRdBPU/z1QLYo=",
"dev": true
},
"he": { "he": {
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz", "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz",
@ -5548,6 +5627,12 @@
"integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10=", "integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10=",
"dev": true "dev": true
}, },
"individual": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/individual/-/individual-2.0.0.tgz",
"integrity": "sha1-gzsJfa0jKU52EXqY+zjg2a1hu5c=",
"dev": true
},
"inert": { "inert": {
"version": "4.2.1", "version": "4.2.1",
"resolved": "https://registry.npmjs.org/inert/-/inert-4.2.1.tgz", "resolved": "https://registry.npmjs.org/inert/-/inert-4.2.1.tgz",
@ -6041,6 +6126,37 @@
} }
} }
}, },
"ipfsd-ctl": {
"version": "0.30.4",
"resolved": "https://registry.npmjs.org/ipfsd-ctl/-/ipfsd-ctl-0.30.4.tgz",
"integrity": "sha512-GZ8uzZ7AZ/DhgVQZ8f1j1sTuzOPMBAszDPd3p4ucxRd9kdlatxlFTaRyLlQB9/A+gx90SUQF8nKTqr5ZDWJ0yg==",
"dev": true,
"requires": {
"async": "2.6.0",
"boom": "7.2.0",
"debug": "3.1.0",
"detect-node": "2.0.3",
"hapi": "16.6.3",
"hat": "0.0.3",
"ipfs-api": "18.2.1",
"ipfs-repo": "0.18.7",
"joi": "13.1.2",
"lodash.clone": "4.5.0",
"lodash.defaults": "4.2.0",
"lodash.defaultsdeep": "4.6.0",
"multiaddr": "3.1.0",
"once": "1.4.0",
"readable-stream": "2.3.5",
"rimraf": "2.6.2",
"safe-json-parse": "4.0.0",
"safe-json-stringify": "1.1.0",
"shutdown": "0.3.0",
"stream-http": "2.8.1",
"subcomandante": "1.0.5",
"superagent": "3.8.2",
"truthy": "0.0.1"
}
},
"ipld": { "ipld": {
"version": "0.15.0", "version": "0.15.0",
"resolved": "https://registry.npmjs.org/ipld/-/ipld-0.15.0.tgz", "resolved": "https://registry.npmjs.org/ipld/-/ipld-0.15.0.tgz",
@ -6441,6 +6557,12 @@
"integrity": "sha1-EaBgVotnM5REAz0BJaYaINVk+zQ=", "integrity": "sha1-EaBgVotnM5REAz0BJaYaINVk+zQ=",
"dev": true "dev": true
}, },
"is-running": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/is-running/-/is-running-1.0.5.tgz",
"integrity": "sha1-4IikdniNqbE7kTxsgT1WQK1j0ig=",
"dev": true
},
"is-stream": { "is-stream": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
@ -7244,6 +7366,12 @@
"integrity": "sha512-svL3uiZf1RwhH+cWrfZn3A4+U58wbP0tGVTLQPbjplZxZ8ROD9VLuNgsRniTlLe7OlSqR79RUehXgpBW/s0IQw==", "integrity": "sha512-svL3uiZf1RwhH+cWrfZn3A4+U58wbP0tGVTLQPbjplZxZ8ROD9VLuNgsRniTlLe7OlSqR79RUehXgpBW/s0IQw==",
"dev": true "dev": true
}, },
"lodash.clone": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.clone/-/lodash.clone-4.5.0.tgz",
"integrity": "sha1-GVhwRQ9aExkkeN9Lw9I9LeoZB7Y=",
"dev": true
},
"lodash.clonedeep": { "lodash.clonedeep": {
"version": "4.5.0", "version": "4.5.0",
"resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz", "resolved": "https://registry.npmjs.org/lodash.clonedeep/-/lodash.clonedeep-4.5.0.tgz",
@ -7255,6 +7383,18 @@
"integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=", "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=",
"dev": true "dev": true
}, },
"lodash.defaults": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/lodash.defaults/-/lodash.defaults-4.2.0.tgz",
"integrity": "sha1-0JF4cW/+pN3p5ft7N/bwgCJ0WAw=",
"dev": true
},
"lodash.defaultsdeep": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/lodash.defaultsdeep/-/lodash.defaultsdeep-4.6.0.tgz",
"integrity": "sha1-vsECT4WxvZbL6kBbI8FK1kQ6b4E=",
"dev": true
},
"lodash.filter": { "lodash.filter": {
"version": "4.6.0", "version": "4.6.0",
"resolved": "https://registry.npmjs.org/lodash.filter/-/lodash.filter-4.6.0.tgz", "resolved": "https://registry.npmjs.org/lodash.filter/-/lodash.filter-4.6.0.tgz",
@ -7607,6 +7747,12 @@
} }
} }
}, },
"methods": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
"integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
"dev": true
},
"micromatch": { "micromatch": {
"version": "3.1.10", "version": "3.1.10",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
@ -7646,6 +7792,12 @@
"brorand": "1.1.0" "brorand": "1.1.0"
} }
}, },
"mime": {
"version": "1.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
"integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
"dev": true
},
"mime-db": { "mime-db": {
"version": "1.33.0", "version": "1.33.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz",
@ -9489,11 +9641,35 @@
"aproba": "1.2.0" "aproba": "1.2.0"
} }
}, },
"rust-result": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/rust-result/-/rust-result-1.0.0.tgz",
"integrity": "sha1-NMdbLm3Dn+WHXlveyFteD5FTb3I=",
"dev": true,
"requires": {
"individual": "2.0.0"
}
},
"safe-buffer": { "safe-buffer": {
"version": "5.1.1", "version": "5.1.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz",
"integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg=="
}, },
"safe-json-parse": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/safe-json-parse/-/safe-json-parse-4.0.0.tgz",
"integrity": "sha1-fA9XjPzNEtM6ccDgVBPi7KFx6qw=",
"dev": true,
"requires": {
"rust-result": "1.0.0"
}
},
"safe-json-stringify": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/safe-json-stringify/-/safe-json-stringify-1.1.0.tgz",
"integrity": "sha512-EzBtUaFH9bHYPc69wqjp0efJI/DPNHdFbGE3uIMn4sVbO0zx8vZ8cG4WKxQfOpUOKsQyGBiT2mTqnCw+6nLswA==",
"dev": true
},
"safe-regex": { "safe-regex": {
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
@ -9668,6 +9844,27 @@
} }
} }
}, },
"shutdown": {
"version": "0.3.0",
"resolved": "https://registry.npmjs.org/shutdown/-/shutdown-0.3.0.tgz",
"integrity": "sha1-AmlEfnzQ+AW6TpxNj5N9LbPvtZQ=",
"dev": true,
"requires": {
"async": "2.6.0",
"debug": "2.6.9"
},
"dependencies": {
"debug": {
"version": "2.6.9",
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
"dev": true,
"requires": {
"ms": "2.0.0"
}
}
}
},
"signal-exit": { "signal-exit": {
"version": "3.0.2", "version": "3.0.2",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz",
@ -10322,6 +10519,17 @@
"resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
"integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo="
}, },
"subcomandante": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/subcomandante/-/subcomandante-1.0.5.tgz",
"integrity": "sha1-JoL4/FMbik+u3xWnV4lkp9YmCVk=",
"dev": true,
"requires": {
"comandante": "0.0.1",
"duplexer": "0.1.1",
"is-running": "1.0.5"
}
},
"subtext": { "subtext": {
"version": "5.0.0", "version": "5.0.0",
"resolved": "https://registry.npmjs.org/subtext/-/subtext-5.0.0.tgz", "resolved": "https://registry.npmjs.org/subtext/-/subtext-5.0.0.tgz",
@ -10352,6 +10560,24 @@
} }
} }
}, },
"superagent": {
"version": "3.8.2",
"resolved": "https://registry.npmjs.org/superagent/-/superagent-3.8.2.tgz",
"integrity": "sha512-gVH4QfYHcY3P0f/BZzavLreHW3T1v7hG9B+hpMQotGQqurOvhv87GcMCd6LWySmBuf+BDR44TQd0aISjVHLeNQ==",
"dev": true,
"requires": {
"component-emitter": "1.2.1",
"cookiejar": "2.1.1",
"debug": "3.1.0",
"extend": "3.0.1",
"form-data": "2.3.2",
"formidable": "1.2.1",
"methods": "1.1.2",
"mime": "1.6.0",
"qs": "6.5.1",
"readable-stream": "2.3.5"
}
},
"supports-color": { "supports-color": {
"version": "2.0.0", "version": "2.0.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
@ -10587,6 +10813,12 @@
"utf8-byte-length": "1.0.4" "utf8-byte-length": "1.0.4"
} }
}, },
"truthy": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/truthy/-/truthy-0.0.1.tgz",
"integrity": "sha1-eJ8zBZ3B8C/dkM8FY6yxpnZvx7w=",
"dev": true
},
"tty-browserify": { "tty-browserify": {
"version": "0.0.0", "version": "0.0.0",
"resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz", "resolved": "https://registry.npmjs.org/tty-browserify/-/tty-browserify-0.0.0.tgz",

View File

@ -32,8 +32,9 @@
"babel-polyfill": "^6.26.0", "babel-polyfill": "^6.26.0",
"babel-preset-es2015": "^6.24.1", "babel-preset-es2015": "^6.24.1",
"datastore-level": "~0.7.0", "datastore-level": "~0.7.0",
"ipfs": "~0.28.0", "ipfs": "~0.28.2",
"ipfs-repo": "~0.18.0", "ipfs-repo": "~0.18.7",
"ipfsd-ctl": "~0.30.3",
"mocha": "^4.0.1", "mocha": "^4.0.1",
"p-each-series": "^1.0.0", "p-each-series": "^1.0.0",
"p-map-series": "^1.0.0", "p-map-series": "^1.0.0",

View File

@ -374,12 +374,8 @@ class OrbitDB {
databaseTypes[type] = store databaseTypes[type] = store
} }
static create () { static getDatabaseTypes () {
return new Error('Not implemented yet!') return databaseTypes
}
static open () {
return new Error('Not implemented yet!')
} }
} }

View File

@ -1,115 +1,137 @@
'use strict' 'use strict'
const path = require('path')
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf')
const mapSeries = require('p-each-series') const mapSeries = require('p-each-series')
const path = require('path')
const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const waitForPeers = require('./utils/wait-for-peers') const {
config,
startIpfs,
stopIpfs,
testAPIs,
connectPeers,
waitForPeers,
} = require('./utils')
const dbPath1 = './orbitdb/tests/counters/peer1' const dbPath1 = './orbitdb/tests/counters/peer1'
const dbPath2 = './orbitdb/tests/counters/peer2' const dbPath2 = './orbitdb/tests/counters/peer2'
const ipfsPath1 = './orbitdb/tests/counters/peer1/ipfs' const ipfsPath1 = './orbitdb/tests/counters/peer1/ipfs'
const ipfsPath2 = './orbitdb/tests/counters/peer2/ipfs' const ipfsPath2 = './orbitdb/tests/counters/peer2/ipfs'
describe('CounterStore', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Counters (${API})`, function() {
this.timeout(config.timeout)
let orbitdb1, orbitdb2 let orbitdb1, orbitdb2
let ipfs1, ipfs2 let ipfsd1, ipfsd2, ipfs1, ipfs2
before(async () => { before(async () => {
rmrf.sync(dbPath1) rmrf.sync(dbPath1)
rmrf.sync(dbPath2) rmrf.sync(dbPath2)
config.daemon1.repo = ipfsPath1 config.daemon1.repo = ipfsPath1
config.daemon2.repo = ipfsPath2 config.daemon2.repo = ipfsPath2
ipfs1 = await startIpfs(config.daemon1) ipfsd1 = await startIpfs(API, config.daemon1)
ipfs2 = await startIpfs(config.daemon2) ipfsd2 = await startIpfs(API, config.daemon2)
// Connect the peers manually to speed up test times ipfs1 = ipfsd1.api
await ipfs2.swarm.connect(ipfs1._peerInfo.multiaddrs._multiaddrs[0].toString()) ipfs2 = ipfsd2.api
await ipfs1.swarm.connect(ipfs2._peerInfo.multiaddrs._multiaddrs[0].toString()) // Connect the peers manually to speed up test times
}) await connectPeers(ipfs1, ipfs2)
after(async () => {
if (orbitdb1)
await orbitdb1.stop()
if (orbitdb2)
await orbitdb2.stop()
if (ipfs1)
await ipfs1.stop()
if (ipfs2)
await ipfs2.stop()
})
beforeEach(() => {
orbitdb1 = new OrbitDB(ipfs1, './orbitdb/1')
orbitdb2 = new OrbitDB(ipfs2, './orbitdb/2')
})
afterEach(async () => {
if (orbitdb1)
await orbitdb1.stop()
if (orbitdb2)
await orbitdb2.stop()
})
describe('counters', function() {
let address
it('increases a counter value', async () => {
const counter = await orbitdb1.counter('counter test', { path: dbPath1 })
address = counter.address.toString()
await mapSeries([13, 1], (f) => counter.inc(f))
assert.equal(counter.value, 14)
await counter.close()
}) })
it('opens a saved counter', async () => { after(async () => {
const counter = await orbitdb1.counter(address, { path: dbPath1 }) if (orbitdb1)
await counter.load() await orbitdb1.stop()
assert.equal(counter.value, 14)
await counter.close() if (orbitdb2)
await orbitdb2.stop()
if (ipfsd1)
await stopIpfs(ipfsd1)
if (ipfsd2)
await stopIpfs(ipfsd2)
}) })
it('syncs counters', async () => { beforeEach(() => {
let options = { orbitdb1 = new OrbitDB(ipfs1, './orbitdb/1')
// Set write access for both clients orbitdb2 = new OrbitDB(ipfs2, './orbitdb/2')
write: [ })
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
const numbers = [[13, 10], [2, 5]] afterEach(async () => {
const increaseCounter = (counterDB, i) => mapSeries(numbers[i], n => counterDB.inc(n)) if (orbitdb1)
await orbitdb1.stop()
// Create a new counter database in the first client if (orbitdb2)
options = Object.assign({}, options, { path: dbPath1 }) await orbitdb2.stop()
const counter1 = await orbitdb1.counter(new Date().getTime().toString(), options) })
// Open the database in the second client
options = Object.assign({}, options, { path: dbPath2, sync: true })
const counter2 = await orbitdb2.counter(counter1.address.toString(), options)
// Wait for peers to connect first describe('counters', function() {
await waitForPeers(ipfs1, [orbitdb2.id], counter1.address.toString()) let address
await waitForPeers(ipfs2, [orbitdb1.id], counter1.address.toString())
// Increase the counters sequentially it('creates and opens a database', async () => {
await mapSeries([counter1, counter2], increaseCounter) const db = await orbitdb1.counter('counter database')
assert.notEqual(db, null)
assert.equal(db.type, 'counter')
assert.equal(db.dbname, 'counter database')
})
return new Promise(resolve => { it('value is undefined when it\'s a fresh database', async () => {
// Wait for a while to make sure db's have been synced const db = await orbitdb1.feed('counter database')
setTimeout(() => { assert.equal(db.value, undefined)
assert.equal(counter1.value, 30) })
assert.equal(counter2.value, 30)
resolve() it('increases a counter value', async () => {
}, 5000) const counter = await orbitdb1.counter('counter test', { path: dbPath1 })
address = counter.address.toString()
await mapSeries([13, 1], (f) => counter.inc(f))
assert.equal(counter.value, 14)
await counter.close()
})
it('opens a saved counter', async () => {
const counter = await orbitdb1.counter(address, { path: dbPath1 })
await counter.load()
assert.equal(counter.value, 14)
await counter.close()
})
it('syncs counters', async () => {
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
const numbers = [[13, 10], [2, 5]]
const increaseCounter = (counterDB, i) => mapSeries(numbers[i], n => counterDB.inc(n))
// Create a new counter database in the first client
options = Object.assign({}, options, { path: dbPath1 })
const counter1 = await orbitdb1.counter(new Date().getTime().toString(), options)
// Open the database in the second client
options = Object.assign({}, options, { path: dbPath2, sync: true })
const counter2 = await orbitdb2.counter(counter1.address.toString(), options)
// Wait for peers to connect first
await waitForPeers(ipfs1, [orbitdb2.id], counter1.address.toString())
await waitForPeers(ipfs2, [orbitdb1.id], counter1.address.toString())
// Increase the counters sequentially
await mapSeries([counter1, counter2], increaseCounter)
return new Promise(resolve => {
// Wait for a while to make sure db's have been synced
setTimeout(() => {
assert.equal(counter1.value, 30)
assert.equal(counter2.value, 30)
resolve()
}, 1000)
})
}) })
}) })
}) })

View File

@ -1,258 +1,268 @@
'use strict' 'use strict'
const assert = require('assert') const assert = require('assert')
const mapSeries = require('p-map-series')
const fs = require('fs') const fs = require('fs')
const path = require('path') const path = require('path')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const mapSeries = require('p-map-series')
const levelup = require('levelup') const levelup = require('levelup')
const leveldown = require('leveldown') const leveldown = require('leveldown')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const OrbitDBAddress = require('../src/orbit-db-address') const OrbitDBAddress = require('../src/orbit-db-address')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/create-open' const dbPath = './orbitdb/tests/create-open'
const ipfsPath = './orbitdb/tests/create-open/ipfs' const ipfsPath = './orbitdb/tests/create-open/ipfs'
describe('orbit-db - Create & Open', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Create & Open (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb, db, address let ipfsd, ipfs, orbitdb, db, address
let localDataPath let localDataPath
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath) rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb = new OrbitDB(ipfs, dbPath) ipfs = ipfsd.api
}) orbitdb = new OrbitDB(ipfs, dbPath)
after(async () => {
if(orbitdb)
await orbitdb.stop()
if (ipfs)
await ipfs.stop()
})
describe('Create', function() {
describe('Errors', function() {
it('throws an error if given an invalid database type', async () => {
let err
try {
db = await orbitdb.create('first', 'invalid-type')
} catch (e) {
err = e.toString()
}
assert.equal(err, 'Error: Invalid database type \'invalid-type\'')
})
it('throws an error if given an address instead of name', async () => {
let err
try {
db = await orbitdb.create('/orbitdb/Qmc9PMho3LwTXSaUXJ8WjeBZyXesAwUofdkGeadFXsqMzW/first', 'feed')
} catch (e) {
err = e.toString()
}
assert.equal(err, 'Error: Given database name is an address. Please give only the name of the database!')
})
it('throws an error if database already exists', async () => {
let err
try {
db = await orbitdb.create('first', 'feed')
db = await orbitdb.create('first', 'feed')
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Database '${db.address}' already exists!`)
})
it('throws an error if database type doesn\'t match', async () => {
let err, log, kv
try {
log = await orbitdb.kvstore('keyvalue')
kv = await orbitdb.eventlog(log.address.toString())
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Database '${log.address}' is type 'keyvalue' but was opened as 'eventlog'`)
})
}) })
describe('Success', function() { after(async () => {
before(async () => { if(orbitdb)
db = await orbitdb.create('second', 'feed') await orbitdb.stop()
localDataPath = path.join(dbPath, db.address.root, db.address.path)
await db.close()
})
it('creates a feed database', async () => { if (ipfsd)
assert.notEqual(db, null) await stopIpfs(ipfsd)
}) })
it('database has the correct address', async () => { describe('Create', function() {
assert.equal(db.address.toString().indexOf('/orbitdb'), 0) describe('Errors', function() {
assert.equal(db.address.toString().indexOf('Qm'), 9) it('throws an error if given an invalid database type', async () => {
assert.equal(db.address.toString().indexOf('second'), 56) let err
}) try {
db = await orbitdb.create('first', 'invalid-type')
it('saves the database locally', async () => { } catch (e) {
assert.equal(fs.existsSync(localDataPath), true) err = e.toString()
})
it('saves database manifest reference locally', async () => {
const manifestHash = db.address.root
const address = db.address.toString()
levelup(localDataPath, (err, db) => {
if (err) {
assert.equal(err, null)
} }
assert.equal(err, 'Error: Invalid database type \'invalid-type\'')
})
db.get(address + '/_manifest', (err, value) => { it('throws an error if given an address instead of name', async () => {
let err
try {
db = await orbitdb.create('/orbitdb/Qmc9PMho3LwTXSaUXJ8WjeBZyXesAwUofdkGeadFXsqMzW/first', 'feed')
} catch (e) {
err = e.toString()
}
assert.equal(err, 'Error: Given database name is an address. Please give only the name of the database!')
})
it('throws an error if database already exists', async () => {
let err
try {
db = await orbitdb.create('first', 'feed', { replicate: false })
db = await orbitdb.create('first', 'feed', { replicate: false })
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Database '${db.address}' already exists!`)
})
it('throws an error if database type doesn\'t match', async () => {
let err, log, kv
try {
log = await orbitdb.kvstore('keyvalue', { replicate: false })
kv = await orbitdb.eventlog(log.address.toString())
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Database '${log.address}' is type 'keyvalue' but was opened as 'eventlog'`)
})
})
describe('Success', function() {
before(async () => {
db = await orbitdb.create('second', 'feed', { replicate: false })
localDataPath = path.join(dbPath, db.address.root, db.address.path)
await db.close()
})
it('creates a feed database', async () => {
assert.notEqual(db, null)
})
it('database has the correct address', async () => {
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('second'), 56)
})
it('saves the database locally', async () => {
assert.equal(fs.existsSync(localDataPath), true)
})
it('saves database manifest reference locally', async () => {
const manifestHash = db.address.root
const address = db.address.toString()
levelup(localDataPath, (err, db) => {
if (err) { if (err) {
assert.equal(err, null) assert.equal(err, null)
} }
const data = JSON.parse(value || '{}') db.get(address + '/_manifest', (err, value) => {
assert.equal(data, manifestHash) if (err) {
assert.equal(err, null)
}
const data = JSON.parse(value || '{}')
assert.equal(data, manifestHash)
})
})
})
it('saves database manifest file locally', async () => {
const dag = await ipfs.object.get(db.address.root)
const manifest = JSON.parse(dag.toJSON().data)
assert.notEqual(manifest, )
assert.equal(manifest.name, 'second')
assert.equal(manifest.type, 'feed')
assert.notEqual(manifest.accessController, null)
assert.equal(manifest.accessController.indexOf('/ipfs'), 0)
})
it('can pass local database directory as an option', async () => {
const dir = './orbitdb/tests/another-feed'
db = await orbitdb.create('third', 'feed', { directory: dir })
localDataPath = path.join(dir, db.address.root, db.address.path)
assert.equal(fs.existsSync(localDataPath), true)
})
describe('Access Controller', function() {
before(async () => {
if (db) {
await db.close()
await db.drop()
}
})
afterEach(async () => {
if (db) {
await db.close()
await db.drop()
}
})
it('creates an access controller and adds ourselves as writer by default', async () => {
db = await orbitdb.create('fourth', 'feed')
assert.deepEqual(db.access.write, [orbitdb.key.getPublic('hex')])
})
it('creates an access controller and adds writers', async () => {
db = await orbitdb.create('fourth', 'feed', { write: ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')] })
assert.deepEqual(db.access.write, ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')])
})
it('creates an access controller and doesn\'t add an admin', async () => {
db = await orbitdb.create('sixth', 'feed')
assert.deepEqual(db.access.admin, [])
})
it('creates an access controller and doesn\'t add read access keys', async () => {
db = await orbitdb.create('seventh', 'feed', { read: ['one', 'two'] })
assert.deepEqual(db.access.read, [])
}) })
}) })
}) })
})
it('saves database manifest file locally', async () => { describe('Open', function() {
const dag = await ipfs.object.get(db.address.root) before(async () => {
const manifest = JSON.parse(dag.toJSON().data) db = await orbitdb.open('abc', { create: true, type: 'feed' })
assert.notEqual(manifest, )
assert.equal(manifest.name, 'second')
assert.equal(manifest.type, 'feed')
assert.notEqual(manifest.accessController, null)
assert.equal(manifest.accessController.indexOf('/ipfs'), 0)
}) })
it('can pass local database directory as an option', async () => { it('throws an error if trying to open a database with name only and \'create\' is not set to \'true\'', async () => {
const dir = './orbitdb/tests/another-feed' let err
db = await orbitdb.create('third', 'feed', { directory: dir }) try {
localDataPath = path.join(dir, db.address.root, db.address.path) db = await orbitdb.open('XXX', { create: false })
assert.equal(fs.existsSync(localDataPath), true) } catch (e) {
err = e.toString()
}
assert.equal(err, "Error: 'options.create' set to 'false'. If you want to create a database, set 'options.create' to 'true'.")
}) })
describe('Access Controller', function() { it('throws an error if trying to open a database with name only and \'create\' is not set to true', async () => {
before(async () => { let err
if (db) { try {
await db.close() db = await orbitdb.open('YYY', { create: true })
await db.drop() } catch (e) {
} err = e.toString()
}) }
assert.equal(err, `Error: Database type not provided! Provide a type with 'options.type' (${OrbitDB.databaseTypes.join('|')})`)
})
afterEach(async () => { it('opens a database - name only', async () => {
if (db) { db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true })
await db.close() assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
await db.drop() assert.equal(db.address.toString().indexOf('Qm'), 9)
} assert.equal(db.address.toString().indexOf('abc'), 56)
}) })
it('creates an access controller and adds ourselves as writer by default', async () => { it('opens the same database - from an address', async () => {
db = await orbitdb.create('fourth', 'feed') db = await orbitdb.open(db.address)
assert.deepEqual(db.access.write, [orbitdb.key.getPublic('hex')]) assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
}) assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('abc'), 56)
})
it('creates an access controller and adds writers', async () => { it('opens a database and adds the creator as the only writer', async () => {
db = await orbitdb.create('fourth', 'feed', { write: ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')] }) db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true, write: [] })
assert.deepEqual(db.access.write, ['another-key', 'yet-another-key', orbitdb.key.getPublic('hex')]) assert.equal(db.access.write.length, 1)
}) assert.equal(db.access.write[0], db.key.getPublic('hex'))
})
it('creates an access controller and doesn\'t add an admin', async () => { it('doesn\'t open a database if we don\'t have it locally', async () => {
db = await orbitdb.create('sixth', 'feed') const address = new OrbitDBAddress(db.address.root.slice(0, -1) + 'A', 'non-existent')
assert.deepEqual(db.access.admin, []) return new Promise((resolve, reject) => {
setTimeout(resolve, 900)
orbitdb.open(address)
.then(() => reject(new Error('Shouldn\'t open the database')))
}) })
})
it('creates an access controller and doesn\'t add read access keys', async () => { it('throws an error if trying to open a database locally and we don\'t have it', () => {
db = await orbitdb.create('seventh', 'feed', { read: ['one', 'two'] }) const address = new OrbitDBAddress(db.address.root.slice(0, -1) + 'A', 'second')
assert.deepEqual(db.access.read, []) return orbitdb.open(address, { localOnly: true })
}) .then(() => new Error('Shouldn\'t open the database'))
.catch(e => {
assert.equal(e.toString(), `Error: Database '${address}' doesn't exist!`)
})
})
it('open the database and it has the added entries', async () => {
db = await orbitdb.open('ZZZ', { create: true, type: 'feed' })
await db.add('hello1')
await db.add('hello2')
db = await orbitdb.open(db.address)
await db.load()
const res = db.iterator({ limit: -1 }).collect()
assert.equal(res.length, 2)
assert.equal(res[0].payload.value, 'hello1')
assert.equal(res[1].payload.value, 'hello2')
}) })
}) })
}) })
describe('Open', function() {
before(async () => {
db = await orbitdb.open('abc', { create: true, type: 'feed' })
})
it('throws an error if trying to open a database with name only and \'create\' is not set to \'true\'', async () => {
let err
try {
db = await orbitdb.open('XXX', { create: false })
} catch (e) {
err = e.toString()
}
assert.equal(err, "Error: 'options.create' set to 'false'. If you want to create a database, set 'options.create' to 'true'.")
})
it('throws an error if trying to open a database with name only and \'create\' is not set to true', async () => {
let err
try {
db = await orbitdb.open('YYY', { create: true })
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Database type not provided! Provide a type with 'options.type' (${OrbitDB.databaseTypes.join('|')})`)
})
it('opens a database - name only', async () => {
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true })
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('abc'), 56)
})
it('opens the same database - from an address', async () => {
db = await orbitdb.open(db.address)
assert.equal(db.address.toString().indexOf('/orbitdb'), 0)
assert.equal(db.address.toString().indexOf('Qm'), 9)
assert.equal(db.address.toString().indexOf('abc'), 56)
})
it('opens a database and adds the creator as the only writer', async () => {
db = await orbitdb.open('abc', { create: true, type: 'feed', overwrite: true, write: [] })
assert.equal(db.access.write.length, 1)
assert.equal(db.access.write[0], db.key.getPublic('hex'))
})
it('doesn\'t open a database if we don\'t have it locally', async () => {
const address = new OrbitDBAddress(db.address.root.slice(0, -1) + 'A', 'non-existent')
return new Promise((resolve, reject) => {
setTimeout(resolve, 900)
orbitdb.open(address)
.then(() => reject(new Error('Shouldn\'t open the database')))
})
})
it('throws an error if trying to open a database locally and we don\'t have it', () => {
const address = new OrbitDBAddress(db.address.root.slice(0, -1) + 'A', 'second')
return orbitdb.open(address, { localOnly: true })
.then(() => new Error('Shouldn\'t open the database'))
.catch(e => {
assert.equal(e.toString(), `Error: Database '${address}' doesn't exist!`)
})
})
it('open the database and it has the added entries', async () => {
db = await orbitdb.open('ZZZ', { create: true, type: 'feed' })
await db.add('hello1')
await db.add('hello2')
db = await orbitdb.open(db.address)
await db.load()
const res = db.iterator({ limit: -1 }).collect()
assert.equal(res.length, 2)
assert.equal(res[0].payload.value, 'hello1')
assert.equal(res[1].payload.value, 'hello2')
})
})
}) })

View File

@ -1,16 +1,21 @@
'use strict' 'use strict'
const assert = require('assert') const assert = require('assert')
const config = require('./utils/config') const rmrf = require('rimraf')
const DocumentStore = require('orbit-db-docstore') const DocumentStore = require('orbit-db-docstore')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const rmrf = require('rimraf')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/create-open' const dbPath = './orbitdb/tests/create-open'
const ipfsPath = './orbitdb/tests/create-open/ipfs' const ipfsPath = './orbitdb/tests/create-open/ipfs'
class CustomStore extends DocumentStore { class CustomStore extends DocumentStore {
constructor (ipfs, id, dbname, options) { constructor (ipfs, id, dbname, options) {
super(ipfs, id, dbname, options) super(ipfs, id, dbname, options)
@ -22,38 +27,44 @@ class CustomStore extends DocumentStore {
} }
} }
describe('orbit-db - Create custom type', function () { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Create Custom Database Type (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb let ipfsd, ipfs, orbitdb
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath) rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb = new OrbitDB(ipfs, dbPath) ipfs = ipfsd.api
}) orbitdb = new OrbitDB(ipfs, dbPath)
after(async () => {
if (orbitdb) await orbitdb.stop()
if (ipfs) await ipfs.stop()
})
describe('addDatabaseType', function () {
it('should have the correct custom type', async () => {
OrbitDB.addDatabaseType(CustomStore.type, CustomStore)
let store = await orbitdb.create(dbPath, CustomStore.type)
assert.equal(store._type, CustomStore.type)
}) })
it('cannot be overwritten', async () => { after(async () => {
try { if (orbitdb) await orbitdb.stop()
if (ipfsd) await stopIpfs(ipfsd)
// Remove the added custom database type from OrbitDB
// between js-ipfs and js-ipfs-api tests
delete OrbitDB.getDatabaseTypes()[CustomStore.type]
})
describe('addDatabaseType', function () {
it('should have the correct custom type', async () => {
OrbitDB.addDatabaseType(CustomStore.type, CustomStore) OrbitDB.addDatabaseType(CustomStore.type, CustomStore)
throw new Error('This should not run.') let store = await orbitdb.create(dbPath, CustomStore.type)
} catch (e) { assert.equal(store._type, CustomStore.type)
assert(e.message.indexOf('already exists') > -1) })
}
it('cannot be overwritten', async () => {
try {
OrbitDB.addDatabaseType(CustomStore.type, CustomStore)
throw new Error('This should not run.')
} catch (e) {
assert(e.message.indexOf('already exists') > -1)
}
})
}) })
}) })
}) })

View File

@ -3,9 +3,14 @@
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config') // Include test utilities
const startIpfs = require('./utils/start-ipfs') const {
const customTestKeystore = require('./utils/custom-test-keystore') config,
startIpfs,
stopIpfs,
testAPIs,
CustomTestKeystore,
} = require('./utils')
const dbPath = './orbitdb/tests/customKeystore' const dbPath = './orbitdb/tests/customKeystore'
const ipfsPath = './orbitdb/tests/customKeystore/ipfs' const ipfsPath = './orbitdb/tests/customKeystore/ipfs'
@ -53,58 +58,61 @@ const databases = [
}, },
] ]
describe('orbit-db - Using custom keystore', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(20000) describe(`orbit-db - Use a Custom Keystore (${API})`, function() {
this.timeout(20000)
let ipfs, orbitdb1 let ipfsd, ipfs, orbitdb1
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath) rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', { ipfs = ipfsd.api
keystore: customTestKeystore orbitdb1 = new OrbitDB(ipfs, dbPath + '/1', {
}) keystore: CustomTestKeystore
})
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if (ipfs)
await ipfs.stop()
})
describe('allows orbit to use a custom keystore with different store types', function() {
databases.forEach(async (database) => {
it(database.type + ' allows custom keystore', async () => {
const db1 = await database.create(orbitdb1, 'custom-keystore')
await database.tryInsert(db1)
assert.deepEqual(database.getTestValue(db1), database.expectedValue)
await db1.close()
}) })
}) })
})
describe('allows a custom keystore to be used with different store and write permissions', function() { after(async () => {
databases.forEach(async (database) => { if(orbitdb1)
it(database.type + ' allows custom keystore', async () => { await orbitdb1.stop()
const options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex')
],
}
const db1 = await database.create(orbitdb1, 'custom-keystore', options) if (ipfsd)
await database.tryInsert(db1) await stopIpfs(ipfsd)
})
assert.deepEqual(database.getTestValue(db1), database.expectedValue) describe('allows orbit to use a custom keystore with different store types', function() {
databases.forEach(async (database) => {
it(database.type + ' allows custom keystore', async () => {
const db1 = await database.create(orbitdb1, 'custom-keystore')
await database.tryInsert(db1)
await db1.close() assert.deepEqual(database.getTestValue(db1), database.expectedValue)
await db1.close()
})
})
})
describe('allows a custom keystore to be used with different store and write permissions', function() {
databases.forEach(async (database) => {
it(database.type + ' allows custom keystore', async () => {
const options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex')
],
}
const db1 = await database.create(orbitdb1, 'custom-keystore', options)
await database.tryInsert(db1)
assert.deepEqual(database.getTestValue(db1), database.expectedValue)
await db1.close()
})
}) })
}) })
}) })

View File

@ -3,160 +3,171 @@
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/docstore' const dbPath = './orbitdb/tests/docstore'
const ipfsPath = './orbitdb/tests/docstore/ipfs' const ipfsPath = './orbitdb/tests/docstore/ipfs'
describe('orbit-db - Document Store', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Document Store (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb1, db let ipfsd, ipfs, orbitdb1, db
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1') ipfs = ipfsd.api
}) orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if (ipfs)
await ipfs.stop()
})
it('creates and opens a database', async () => {
db = await orbitdb1.docstore('first doc database')
db = await orbitdb1.docstore('first doc database')
})
describe('Default index \'_id\'', function() {
beforeEach(async () => {
const options = {
replicate: false,
maxHistory: 0,
path: dbPath,
}
db = await orbitdb1.docstore(config.dbname, options)
}) })
afterEach(async () => { after(async () => {
await db.drop() if(orbitdb1)
await orbitdb1.stop()
if (ipfsd)
await stopIpfs(ipfsd)
}) })
it('put', async () => { it('creates and opens a database', async () => {
const doc = { _id: 'hello world', doc: 'all the things'} db = await orbitdb1.docstore('first doc database')
await db.put(doc) assert.notEqual(db, null)
const value = db.get('hello world') assert.equal(db.type, 'docstore')
assert.deepEqual(value, [doc]) assert.equal(db.dbname, 'first doc database')
}) })
it('get - partial term match', async () => { describe('Default index \'_id\'', function() {
const doc1 = { _id: 'hello world', doc: 'some things'} beforeEach(async () => {
const doc2 = { _id: 'hello universe', doc: 'all the things'} const options = {
const doc3 = { _id: 'sup world', doc: 'other things'} replicate: false,
await db.put(doc1) maxHistory: 0,
await db.put(doc2) path: dbPath,
await db.put(doc3) }
const value = db.get('hello') db = await orbitdb1.docstore(config.dbname, options)
assert.deepEqual(value, [doc1, doc2]) })
afterEach(async () => {
await db.drop()
})
it('put', async () => {
const doc = { _id: 'hello world', doc: 'all the things'}
await db.put(doc)
const value = db.get('hello world')
assert.deepEqual(value, [doc])
})
it('get - partial term match', async () => {
const doc1 = { _id: 'hello world', doc: 'some things'}
const doc2 = { _id: 'hello universe', doc: 'all the things'}
const doc3 = { _id: 'sup world', doc: 'other things'}
await db.put(doc1)
await db.put(doc2)
await db.put(doc3)
const value = db.get('hello')
assert.deepEqual(value, [doc1, doc2])
})
it('get after delete', async () => {
const doc1 = { _id: 'hello world', doc: 'some things'}
const doc2 = { _id: 'hello universe', doc: 'all the things'}
const doc3 = { _id: 'sup world', doc: 'other things'}
await db.put(doc1)
await db.put(doc2)
await db.put(doc3)
await db.del('hello universe')
const value1 = db.get('hello')
const value2 = db.get('sup')
assert.deepEqual(value1, [doc1])
assert.deepEqual(value2, [doc3])
})
it('put updates a value', async () => {
const doc1 = { _id: 'hello world', doc: 'all the things'}
const doc2 = { _id: 'hello world', doc: 'some of the things'}
await db.put(doc1)
await db.put(doc2)
const value = db.get('hello')
assert.deepEqual(value, [doc2])
})
it('query', async () => {
const doc1 = { _id: 'hello world', doc: 'all the things', views: 17}
const doc2 = { _id: 'sup world', doc: 'some of the things', views: 10}
const doc3 = { _id: 'hello other world', doc: 'none of the things', views: 5}
const doc4 = { _id: 'hey universe', doc: ''}
await db.put(doc1)
await db.put(doc2)
await db.put(doc3)
await db.put(doc4)
const value1 = db.query((e) => e.views > 5)
const value2 = db.query((e) => e.views > 10)
const value3 = db.query((e) => e.views > 17)
assert.deepEqual(value1, [doc1, doc2])
assert.deepEqual(value2, [doc1])
assert.deepEqual(value3, [])
})
it('query after delete', async () => {
const doc1 = { _id: 'hello world', doc: 'all the things', views: 17}
const doc2 = { _id: 'sup world', doc: 'some of the things', views: 10}
const doc3 = { _id: 'hello other world', doc: 'none of the things', views: 5}
const doc4 = { _id: 'hey universe', doc: ''}
await db.put(doc1)
await db.put(doc2)
await db.put(doc3)
await db.del('hello world')
await db.put(doc4)
const value1 = db.query((e) => e.views >= 5)
const value2 = db.query((e) => e.views >= 10)
assert.deepEqual(value1, [doc2, doc3])
assert.deepEqual(value2, [doc2])
})
}) })
it('get after delete', async () => { describe('Specified index', function() {
const doc1 = { _id: 'hello world', doc: 'some things'} beforeEach(async () => {
const doc2 = { _id: 'hello universe', doc: 'all the things'} const options = {
const doc3 = { _id: 'sup world', doc: 'other things'} indexBy: 'doc',
await db.put(doc1) replicate: false,
await db.put(doc2) maxHistory: 0
await db.put(doc3) }
await db.del('hello universe') db = await orbitdb1.docstore(config.dbname, options)
const value1 = db.get('hello') })
const value2 = db.get('sup')
assert.deepEqual(value1, [doc1])
assert.deepEqual(value2, [doc3])
})
it('put updates a value', async () => { afterEach(async () => {
const doc1 = { _id: 'hello world', doc: 'all the things'} await db.drop()
const doc2 = { _id: 'hello world', doc: 'some of the things'} })
await db.put(doc1)
await db.put(doc2)
const value = db.get('hello')
assert.deepEqual(value, [doc2])
})
it('query', async () => { it('put', async () => {
const doc1 = { _id: 'hello world', doc: 'all the things', views: 17} const doc = { _id: 'hello world', doc: 'all the things'}
const doc2 = { _id: 'sup world', doc: 'some of the things', views: 10} await db.put(doc)
const doc3 = { _id: 'hello other world', doc: 'none of the things', views: 5} const value = db.get('all')
const doc4 = { _id: 'hey universe', doc: ''} assert.deepEqual(value, [doc])
})
await db.put(doc1) it('get - matches specified index', async () => {
await db.put(doc2) const doc1 = { _id: 'hello world', doc: 'all the things'}
await db.put(doc3) const doc2 = { _id: 'hello world', doc: 'some things'}
await db.put(doc4) await db.put(doc1)
await db.put(doc2)
const value1 = db.query((e) => e.views > 5) const value1 = db.get('all')
const value2 = db.query((e) => e.views > 10) const value2 = db.get('some')
const value3 = db.query((e) => e.views > 17) assert.deepEqual(value1, [doc1])
assert.deepEqual(value2, [doc2])
assert.deepEqual(value1, [doc1, doc2]) })
assert.deepEqual(value2, [doc1])
assert.deepEqual(value3, [])
})
it('query after delete', async () => {
const doc1 = { _id: 'hello world', doc: 'all the things', views: 17}
const doc2 = { _id: 'sup world', doc: 'some of the things', views: 10}
const doc3 = { _id: 'hello other world', doc: 'none of the things', views: 5}
const doc4 = { _id: 'hey universe', doc: ''}
await db.put(doc1)
await db.put(doc2)
await db.put(doc3)
await db.del('hello world')
await db.put(doc4)
const value1 = db.query((e) => e.views >= 5)
const value2 = db.query((e) => e.views >= 10)
assert.deepEqual(value1, [doc2, doc3])
assert.deepEqual(value2, [doc2])
})
})
describe('Specified index', function() {
beforeEach(async () => {
const options = {
indexBy: 'doc',
replicate: false,
maxHistory: 0
}
db = await orbitdb1.docstore(config.dbname, options)
})
afterEach(async () => {
await db.drop()
})
it('put', async () => {
const doc = { _id: 'hello world', doc: 'all the things'}
await db.put(doc)
const value = db.get('all')
assert.deepEqual(value, [doc])
})
it('get - matches specified index', async () => {
const doc1 = { _id: 'hello world', doc: 'all the things'}
const doc2 = { _id: 'hello world', doc: 'some things'}
await db.put(doc1)
await db.put(doc2)
const value1 = db.get('all')
const value2 = db.get('some')
assert.deepEqual(value1, [doc1])
assert.deepEqual(value2, [doc2])
}) })
}) })
}) })

View File

@ -5,46 +5,55 @@ const fs = require('fs')
const path = require('path') const path = require('path')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/drop' const dbPath = './orbitdb/tests/drop'
const ipfsPath = './orbitdb/tests/drop/ipfs' const ipfsPath = './orbitdb/tests/drop/ipfs'
describe('orbit-db - Drop Database', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Drop Database (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb, db, address let ipfsd, ipfs, orbitdb, db, address
let localDataPath let localDataPath
before(async () => {
config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1)
orbitdb = new OrbitDB(ipfs, dbPath)
})
after(async () => {
if(orbitdb)
await orbitdb.stop()
if (ipfs)
await ipfs.stop()
rmrf.sync(dbPath)
})
describe('Drop', function() {
before(async () => { before(async () => {
db = await orbitdb.create('first', 'feed') config.daemon1.repo = ipfsPath
localDataPath = path.join(dbPath, db.address.root, db.address.path) rmrf.sync(config.daemon1.repo)
assert.equal(fs.existsSync(localDataPath), true) rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb = new OrbitDB(ipfs, dbPath)
}) })
it('removes local database files', async () => { after(async () => {
await db.drop() if(orbitdb)
assert.equal(fs.existsSync(localDataPath), false) await orbitdb.stop()
if (ipfsd)
await stopIpfs(ipfsd)
rmrf.sync(dbPath)
})
describe('Drop', function() {
before(async () => {
db = await orbitdb.create('first', 'feed')
localDataPath = path.join(dbPath, db.address.root, db.address.path)
assert.equal(fs.existsSync(localDataPath), true)
})
it('removes local database files', async () => {
await db.drop()
assert.equal(fs.existsSync(localDataPath), false)
})
}) })
}) })
}) })

View File

@ -1,346 +1,363 @@
'use strict' 'use strict'
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf')
const mapSeries = require('p-map-series') const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const last = arr => arr[arr.length - 1] const last = arr => arr[arr.length - 1]
const dbPath = './orbitdb/tests/eventlog' const dbPath = './orbitdb/tests/eventlog'
const ipfsPath = './orbitdb/tests/eventlog/ipfs' const ipfsPath = './orbitdb/tests/eventlog/ipfs'
describe('orbit-db - Eventlog', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Log Database (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb1, db let ipfsd, ipfs, orbitdb1, db
before(async () => {
config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
})
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if (ipfs)
await ipfs.stop()
})
describe('Eventlog', function () {
it('creates and opens a database', async () => {
db = await orbitdb1.eventlog('first database')
db = await orbitdb1.eventlog('first database')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 0)
})
it('returns the added entry\'s hash, 1 entry', async () => {
db = await orbitdb1.eventlog('first database')
const hash = await db.add('hello1')
const items = db.iterator({ limit: -1 }).collect()
assert.notEqual(hash, null)
assert.equal(hash, last(items).hash)
assert.equal(items.length, 1)
})
it('returns the added entry\'s hash, 2 entries', async () => {
const prevHash = db.iterator().collect()[0].hash
const hash = await db.add('hello2')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 2)
assert.notEqual(hash, null)
assert.notEqual(hash, prevHash)
assert.equal(hash, last(items).hash)
})
it('adds five items', async () => {
db = await orbitdb1.eventlog('second database')
await mapSeries([1, 2, 3, 4, 5], (i) => db.add('hello' + i))
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 5)
assert.equal(items[0].payload.value, 'hello1')
assert.equal(last(items.map((f) => f.payload.value)), 'hello5')
})
it('adds an item that is > 256 bytes', async () => {
db = await orbitdb1.eventlog('third database')
let msg = new Buffer(1024)
msg.fill('a')
const hash = await db.add(msg.toString())
assert.notEqual(hash, null)
assert.equal(hash.startsWith('Qm'), true)
assert.equal(hash.length, 46)
})
})
describe('Iterator', function() {
let items = []
const itemCount = 5
before(async () => { before(async () => {
items = [] config.daemon1.repo = ipfsPath
db = await orbitdb1.eventlog('iterator tests') rmrf.sync(config.daemon1.repo)
items = await mapSeries([0, 1, 2, 3, 4], (i) => db.add('hello' + i)) rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
}) })
describe('Defaults', function() { after(async () => {
it('returns an iterator', () => { if(orbitdb1)
const iter = db.iterator() await orbitdb1.stop()
const next = iter.next().value
assert.notEqual(iter, null) if (ipfsd)
assert.notEqual(next, null) await stopIpfs(ipfsd)
})
describe('Eventlog', function () {
it('creates and opens a database', async () => {
db = await orbitdb1.eventlog('log database')
assert.notEqual(db, null)
assert.equal(db.type, 'eventlog')
assert.equal(db.dbname, 'log database')
}) })
it('returns an item with the correct structure', () => { it('returns 0 items when it\'s a fresh database', async () => {
const iter = db.iterator() db = await orbitdb1.eventlog('log database')
const next = iter.next().value const items = db.iterator({ limit: -1 }).collect()
assert.notEqual(next, null) assert.equal(items.length, 0)
assert.equal(next.hash.startsWith('Qm'), true)
assert.equal(next.payload.key, null)
assert.equal(next.payload.value, 'hello4')
}) })
it('implements Iterator interface', () => { it('returns the added entry\'s hash, 1 entry', async () => {
const iter = db.iterator({ limit: -1 }) db = await orbitdb1.eventlog('first database')
let messages = [] const hash = await db.add('hello1')
const items = db.iterator({ limit: -1 }).collect()
for(let i of iter) assert.notEqual(hash, null)
messages.push(i.key) assert.equal(hash, last(items).hash)
assert.equal(items.length, 1)
assert.equal(messages.length, items.length)
}) })
it('returns 1 item as default', () => { it('returns the added entry\'s hash, 2 entries', async () => {
const iter = db.iterator() db = await orbitdb1.eventlog('first database')
const first = iter.next().value await db.load()
const second = iter.next().value const prevHash = db.iterator().collect()[0].hash
assert.equal(first.hash, items[items.length - 1]) const hash = await db.add('hello2')
assert.equal(second, null) const items = db.iterator({ limit: -1 }).collect()
assert.equal(first.payload.value, 'hello4') assert.equal(items.length, 2)
assert.notEqual(hash, null)
assert.notEqual(hash, prevHash)
assert.equal(hash, last(items).hash)
}) })
it('returns items in the correct order', () => { it('adds five items', async () => {
const amount = 3 db = await orbitdb1.eventlog('second database')
const iter = db.iterator({ limit: amount }) await mapSeries([1, 2, 3, 4, 5], (i) => db.add('hello' + i))
let i = items.length - amount const items = db.iterator({ limit: -1 }).collect()
for(let item of iter) { assert.equal(items.length, 5)
assert.equal(item.payload.value, 'hello' + i) assert.equal(items[0].payload.value, 'hello1')
i ++ assert.equal(last(items.map((f) => f.payload.value)), 'hello5')
} })
it('adds an item that is > 256 bytes', async () => {
db = await orbitdb1.eventlog('third database')
let msg = new Buffer(1024)
msg.fill('a')
const hash = await db.add(msg.toString())
assert.notEqual(hash, null)
assert.equal(hash.startsWith('Qm'), true)
assert.equal(hash.length, 46)
}) })
}) })
describe('Collect', function() { describe('Iterator', function() {
it('returns all items', () => { let items = []
const messages = db.iterator({ limit: -1 }).collect() const itemCount = 5
assert.equal(messages.length, items.length)
assert.equal(messages[0].payload.value, 'hello0') before(async () => {
assert.equal(messages[messages.length - 1].payload.value, 'hello4') items = []
db = await orbitdb1.eventlog('iterator tests')
items = await mapSeries([0, 1, 2, 3, 4], (i) => db.add('hello' + i))
}) })
it('returns 1 item', () => { describe('Defaults', function() {
const messages = db.iterator().collect() it('returns an iterator', () => {
assert.equal(messages.length, 1) const iter = db.iterator()
const next = iter.next().value
assert.notEqual(iter, null)
assert.notEqual(next, null)
})
it('returns an item with the correct structure', () => {
const iter = db.iterator()
const next = iter.next().value
assert.notEqual(next, null)
assert.equal(next.hash.startsWith('Qm'), true)
assert.equal(next.payload.key, null)
assert.equal(next.payload.value, 'hello4')
})
it('implements Iterator interface', () => {
const iter = db.iterator({ limit: -1 })
let messages = []
for(let i of iter)
messages.push(i.key)
assert.equal(messages.length, items.length)
})
it('returns 1 item as default', () => {
const iter = db.iterator()
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, items[items.length - 1])
assert.equal(second, null)
assert.equal(first.payload.value, 'hello4')
})
it('returns items in the correct order', () => {
const amount = 3
const iter = db.iterator({ limit: amount })
let i = items.length - amount
for(let item of iter) {
assert.equal(item.payload.value, 'hello' + i)
i ++
}
})
}) })
it('returns 3 items', () => { describe('Collect', function() {
const messages = db.iterator({ limit: 3 }).collect() it('returns all items', () => {
assert.equal(messages.length, 3) const messages = db.iterator({ limit: -1 }).collect()
}) assert.equal(messages.length, items.length)
}) assert.equal(messages[0].payload.value, 'hello0')
assert.equal(messages[messages.length - 1].payload.value, 'hello4')
describe('Options: limit', function() { })
it('returns 1 item when limit is 0', () => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
})
it('returns 1 item when limit is 1', () => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
})
it('returns 3 items', () => {
const iter = db.iterator({ limit: 3 })
const first = iter.next().value
const second = iter.next().value
const third = iter.next().value
const fourth = iter.next().value
assert.equal(first.hash, items[items.length - 3])
assert.equal(second.hash, items[items.length - 2])
assert.equal(third.hash, items[items.length - 1])
assert.equal(fourth, null)
})
it('returns all items', () => {
const messages = db.iterator({ limit: -1 })
.collect()
.map((e) => e.hash)
messages.reverse()
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[items.length - 1])
})
it('returns all items when limit is bigger than -1', () => {
const messages = db.iterator({ limit: -300 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0])
})
it('returns all items when limit is bigger than number of items', () => {
const messages = db.iterator({ limit: 300 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0])
})
})
describe('Option: ranges', function() {
describe('gt & gte', function() {
it('returns 1 item when gte is the head', () => {
const messages = db.iterator({ gte: last(items), limit: -1 })
.collect()
.map((e) => e.hash)
it('returns 1 item', () => {
const messages = db.iterator().collect()
assert.equal(messages.length, 1) assert.equal(messages.length, 1)
assert.equal(messages[0], last(items))
}) })
it('returns 0 items when gt is the head', () => { it('returns 3 items', () => {
const messages = db.iterator({ gt: last(items) }).collect() const messages = db.iterator({ limit: 3 }).collect()
assert.equal(messages.length, 0) assert.equal(messages.length, 3)
})
})
describe('Options: limit', function() {
it('returns 1 item when limit is 0', () => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
}) })
it('returns 2 item when gte is defined', () => { it('returns 1 item when limit is 1', () => {
const gte = items[items.length - 2] const iter = db.iterator({ limit: 1 })
const messages = db.iterator({ gte: gte, limit: -1 }) const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
})
it('returns 3 items', () => {
const iter = db.iterator({ limit: 3 })
const first = iter.next().value
const second = iter.next().value
const third = iter.next().value
const fourth = iter.next().value
assert.equal(first.hash, items[items.length - 3])
assert.equal(second.hash, items[items.length - 2])
assert.equal(third.hash, items[items.length - 1])
assert.equal(fourth, null)
})
it('returns all items', () => {
const messages = db.iterator({ limit: -1 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, 2) messages.reverse()
assert.equal(messages[0], items[items.length - 2]) assert.equal(messages.length, items.length)
assert.equal(messages[1], items[items.length - 1]) assert.equal(messages[0], items[items.length - 1])
}) })
it('returns all items when gte is the root item', () => { it('returns all items when limit is bigger than -1', () => {
const messages = db.iterator({ gte: items[0], limit: -1 }) const messages = db.iterator({ limit: -300 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, items.length) assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0]) assert.equal(messages[0], items[0])
assert.equal(messages[messages.length - 1], last(items))
}) })
it('returns items when gt is the root item', () => { it('returns all items when limit is bigger than number of items', () => {
const messages = db.iterator({ gt: items[0], limit: -1 }) const messages = db.iterator({ limit: 300 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, itemCount - 1) assert.equal(messages.length, items.length)
assert.equal(messages[0], items[1]) assert.equal(messages[0], items[0])
assert.equal(messages[3], last(items))
})
it('returns items when gt is defined', () => {
const messages = db.iterator({ limit: -1})
.collect()
.map((e) => e.hash)
const gt = messages[2]
const messages2 = db.iterator({ gt: gt, limit: 100 })
.collect()
.map((e) => e.hash)
assert.equal(messages2.length, 2)
assert.equal(messages2[0], messages[messages.length - 2])
assert.equal(messages2[1], messages[messages.length - 1])
}) })
}) })
describe('lt & lte', function() { describe('Option: ranges', function() {
it('returns one item after head when lt is the head', () => { describe('gt & gte', function() {
const messages = db.iterator({ lt: last(items) }) it('returns 1 item when gte is the head', () => {
.collect() const messages = db.iterator({ gte: last(items), limit: -1 })
.map((e) => e.hash) .collect()
.map((e) => e.hash)
assert.equal(messages.length, 1) assert.equal(messages.length, 1)
assert.equal(messages[0], items[items.length - 2]) assert.equal(messages[0], last(items))
})
it('returns 0 items when gt is the head', () => {
const messages = db.iterator({ gt: last(items) }).collect()
assert.equal(messages.length, 0)
})
it('returns 2 item when gte is defined', () => {
const gte = items[items.length - 2]
const messages = db.iterator({ gte: gte, limit: -1 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, 2)
assert.equal(messages[0], items[items.length - 2])
assert.equal(messages[1], items[items.length - 1])
})
it('returns all items when gte is the root item', () => {
const messages = db.iterator({ gte: items[0], limit: -1 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0])
assert.equal(messages[messages.length - 1], last(items))
})
it('returns items when gt is the root item', () => {
const messages = db.iterator({ gt: items[0], limit: -1 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, itemCount - 1)
assert.equal(messages[0], items[1])
assert.equal(messages[3], last(items))
})
it('returns items when gt is defined', () => {
const messages = db.iterator({ limit: -1})
.collect()
.map((e) => e.hash)
const gt = messages[2]
const messages2 = db.iterator({ gt: gt, limit: 100 })
.collect()
.map((e) => e.hash)
assert.equal(messages2.length, 2)
assert.equal(messages2[0], messages[messages.length - 2])
assert.equal(messages2[1], messages[messages.length - 1])
})
}) })
it('returns all items when lt is head and limit is -1', () => { describe('lt & lte', function() {
const messages = db.iterator({ lt: last(items), limit: -1 }) it('returns one item after head when lt is the head', () => {
.collect() const messages = db.iterator({ lt: last(items) })
.map((e) => e.hash) .collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length - 1) assert.equal(messages.length, 1)
assert.equal(messages[0], items[0]) assert.equal(messages[0], items[items.length - 2])
assert.equal(messages[messages.length - 1], items[items.length - 2]) })
})
it('returns 3 items when lt is head and limit is 3', () => { it('returns all items when lt is head and limit is -1', () => {
const messages = db.iterator({ lt: last(items), limit: 3 }) const messages = db.iterator({ lt: last(items), limit: -1 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, 3) assert.equal(messages.length, items.length - 1)
assert.equal(messages[0], items[items.length - 4]) assert.equal(messages[0], items[0])
assert.equal(messages[2], items[items.length - 2]) assert.equal(messages[messages.length - 1], items[items.length - 2])
}) })
it('returns null when lt is the root item', () => { it('returns 3 items when lt is head and limit is 3', () => {
const messages = db.iterator({ lt: items[0] }).collect() const messages = db.iterator({ lt: last(items), limit: 3 })
assert.equal(messages.length, 0) .collect()
}) .map((e) => e.hash)
it('returns one item when lte is the root item', () => { assert.equal(messages.length, 3)
const messages = db.iterator({ lte: items[0] }) assert.equal(messages[0], items[items.length - 4])
.collect() assert.equal(messages[2], items[items.length - 2])
.map((e) => e.hash) })
assert.equal(messages.length, 1) it('returns null when lt is the root item', () => {
assert.equal(messages[0], items[0]) const messages = db.iterator({ lt: items[0] }).collect()
}) assert.equal(messages.length, 0)
})
it('returns all items when lte is the head', () => { it('returns one item when lte is the root item', () => {
const messages = db.iterator({ lte: last(items), limit: -1 }) const messages = db.iterator({ lte: items[0] })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, itemCount) assert.equal(messages.length, 1)
assert.equal(messages[0], items[0]) assert.equal(messages[0], items[0])
assert.equal(messages[4], last(items)) })
})
it('returns 3 items when lte is the head', () => { it('returns all items when lte is the head', () => {
const messages = db.iterator({ lte: last(items), limit: 3 }) const messages = db.iterator({ lte: last(items), limit: -1 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, 3) assert.equal(messages.length, itemCount)
assert.equal(messages[0], items[items.length - 3]) assert.equal(messages[0], items[0])
assert.equal(messages[1], items[items.length - 2]) assert.equal(messages[4], last(items))
assert.equal(messages[2], last(items)) })
it('returns 3 items when lte is the head', () => {
const messages = db.iterator({ lte: last(items), limit: 3 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, 3)
assert.equal(messages[0], items[items.length - 3])
assert.equal(messages[1], items[items.length - 2])
assert.equal(messages[2], last(items))
})
}) })
}) })
}) })

View File

@ -1,388 +1,403 @@
'use strict' 'use strict'
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf')
const mapSeries = require('p-map-series') const mapSeries = require('p-map-series')
const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const last = arr => arr[arr.length - 1] const last = arr => arr[arr.length - 1]
const dbPath = './orbitdb/tests/feed' const dbPath = './orbitdb/tests/feed'
const ipfsPath = './orbitdb/tests/feed/ipfs' const ipfsPath = './orbitdb/tests/feed/ipfs'
describe('orbit-db - Feed', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Feed Database (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb1, db, address let ipfsd, ipfs, orbitdb1, db, address
before(async () => {
config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
})
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if (ipfs)
await ipfs.stop()
})
describe('Feed', function() {
it('creates and opens a database', async () => {
db = await orbitdb1.feed('first database')
db = await orbitdb1.feed('first database')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 0)
})
it('returns the added entry\'s hash, 1 entry', async () => {
db = await orbitdb1.feed('first')
address = db.address.toString()
const hash = await db.add('hello1')
const items = db.iterator({ limit: -1 }).collect()
assert.notEqual(hash, null)
assert.equal(hash, last(items).hash)
assert.equal(items.length, 1)
})
it('returns the added entry\'s hash, 2 entries', async () => {
db = await orbitdb1.feed(address)
await db.load()
const prevHash = db.iterator().collect()[0].hash
const hash = await db.add('hello2')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 2)
assert.notEqual(hash, null)
assert.notEqual(hash, prevHash)
assert.equal(hash, last(items).hash)
})
it('adds five items', async () => {
db = await orbitdb1.feed('second')
await mapSeries([1, 2, 3, 4, 5], (i) => db.add('hello' + i))
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 5)
assert.equal(items[0].payload.value, 'hello1')
assert.equal(items[items.length - 1].payload.value, 'hello5')
})
it('adds an item that is > 256 bytes', async () => {
db = await orbitdb1.feed('third')
let msg = new Buffer(1024)
msg.fill('a')
const hash = await db.add(msg.toString())
assert.notEqual(hash, null)
assert.equal(hash.startsWith('Qm'), true)
assert.equal(hash.length, 46)
})
it('deletes an item when only one item in the database', async () => {
db = await orbitdb1.feed('fourth')
const hash = await db.add('hello3')
const delopHash = await db.remove(hash)
const items = db.iterator().collect()
assert.equal(delopHash.startsWith('Qm'), true)
assert.equal(items.length, 0)
})
it('deletes an item when two items in the database', async () => {
db = await orbitdb1.feed('fifth')
await db.add('hello1')
const hash = await db.add('hello2')
await db.remove(hash)
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 1)
assert.equal(items[0].payload.value, 'hello1')
})
it('deletes an item between adds', async () => {
db = await orbitdb1.feed('sixth')
const hash = await db.add('hello1')
await db.add('hello2')
await db.remove(hash)
await db.add('hello3')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 2)
const firstItem = items[0]
const secondItem = items[1]
assert.equal(firstItem.hash.startsWith('Qm'), true)
assert.equal(firstItem.payload.key, null)
assert.equal(firstItem.payload.value, 'hello2')
assert.equal(secondItem.payload.value, 'hello3')
})
})
describe('Iterator', function() {
let items = []
const itemCount = 5
before(async () => { before(async () => {
items = [] config.daemon1.repo = ipfsPath
db = await orbitdb1.feed('feed-iterator') rmrf.sync(config.daemon1.repo)
items = await mapSeries([0, 1, 2, 3, 4], (i) => db.add('hello' + i)) rmrf.sync(dbPath)
ipfsd = await startIpfs(API, config.daemon1)
ipfs = ipfsd.api
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
}) })
describe('Defaults', function() { after(async () => {
it('returns an iterator', () => { if(orbitdb1)
const iter = db.iterator() await orbitdb1.stop()
const next = iter.next().value
assert.notEqual(iter, null) if (ipfsd)
assert.notEqual(next, null) await stopIpfs(ipfsd)
})
describe('Feed', function() {
it('creates and opens a database', async () => {
db = await orbitdb1.feed('feed database')
assert.notEqual(db, null)
assert.equal(db.type, 'feed')
assert.equal(db.dbname, 'feed database')
}) })
it('returns an item with the correct structure', () => { it('returns 0 items when it\'s a fresh database', async () => {
const iter = db.iterator() db = await orbitdb1.feed('feed database')
const next = iter.next().value const items = db.iterator({ limit: -1 }).collect()
assert.notEqual(next, null) assert.equal(items.length, 0)
assert.equal(next.hash.startsWith('Qm'), true)
assert.equal(next.payload.key, null)
assert.equal(next.payload.value, 'hello4')
}) })
it('implements Iterator interface', () => { it('returns the added entry\'s hash, 1 entry', async () => {
const iter = db.iterator({ limit: -1 }) db = await orbitdb1.feed('first')
let messages = [] address = db.address.toString()
const hash = await db.add('hello1')
for(let i of iter) const items = db.iterator({ limit: -1 }).collect()
messages.push(i.key) assert.notEqual(hash, null)
assert.equal(hash, last(items).hash)
assert.equal(messages.length, items.length) assert.equal(items.length, 1)
}) })
it('returns 1 item as default', () => { it('returns the added entry\'s hash, 2 entries', async () => {
const iter = db.iterator() db = await orbitdb1.feed(address)
const first = iter.next().value await db.load()
const second = iter.next().value const prevHash = db.iterator().collect()[0].hash
assert.equal(first.hash, items[items.length - 1]) const hash = await db.add('hello2')
assert.equal(second, null) const items = db.iterator({ limit: -1 }).collect()
assert.equal(first.payload.value, 'hello4') assert.equal(items.length, 2)
assert.notEqual(hash, null)
assert.notEqual(hash, prevHash)
assert.equal(hash, last(items).hash)
}) })
it('returns items in the correct order', () => { it('adds five items', async () => {
const amount = 3 db = await orbitdb1.feed('second')
const iter = db.iterator({ limit: amount }) await mapSeries([1, 2, 3, 4, 5], (i) => db.add('hello' + i))
let i = items.length - amount const items = db.iterator({ limit: -1 }).collect()
for(let item of iter) { assert.equal(items.length, 5)
assert.equal(item.payload.value, 'hello' + i) assert.equal(items[0].payload.value, 'hello1')
i ++ assert.equal(items[items.length - 1].payload.value, 'hello5')
} })
it('adds an item that is > 256 bytes', async () => {
db = await orbitdb1.feed('third')
let msg = new Buffer(1024)
msg.fill('a')
const hash = await db.add(msg.toString())
assert.notEqual(hash, null)
assert.equal(hash.startsWith('Qm'), true)
assert.equal(hash.length, 46)
})
it('deletes an item when only one item in the database', async () => {
db = await orbitdb1.feed('fourth')
const hash = await db.add('hello3')
const delopHash = await db.remove(hash)
const items = db.iterator().collect()
assert.equal(delopHash.startsWith('Qm'), true)
assert.equal(items.length, 0)
})
it('deletes an item when two items in the database', async () => {
db = await orbitdb1.feed('fifth')
await db.add('hello1')
const hash = await db.add('hello2')
await db.remove(hash)
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 1)
assert.equal(items[0].payload.value, 'hello1')
})
it('deletes an item between adds', async () => {
db = await orbitdb1.feed('sixth')
const hash = await db.add('hello1')
await db.add('hello2')
await db.remove(hash)
await db.add('hello3')
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 2)
const firstItem = items[0]
const secondItem = items[1]
assert.equal(firstItem.hash.startsWith('Qm'), true)
assert.equal(firstItem.payload.key, null)
assert.equal(firstItem.payload.value, 'hello2')
assert.equal(secondItem.payload.value, 'hello3')
}) })
}) })
describe('Collect', function() { describe('Iterator', function() {
it('returns all items', () => { let items = []
const messages = db.iterator({ limit: -1 }).collect() const itemCount = 5
assert.equal(messages.length, items.length)
assert.equal(messages[0].payload.value, 'hello0') before(async () => {
assert.equal(messages[messages.length - 1].payload.value, 'hello4') items = []
db = await orbitdb1.feed('feed-iterator')
items = await mapSeries([0, 1, 2, 3, 4], (i) => db.add('hello' + i))
}) })
it('returns 1 item', () => { describe('Defaults', function() {
const messages = db.iterator().collect() it('returns an iterator', () => {
assert.equal(messages.length, 1) const iter = db.iterator()
const next = iter.next().value
assert.notEqual(iter, null)
assert.notEqual(next, null)
})
it('returns an item with the correct structure', () => {
const iter = db.iterator()
const next = iter.next().value
assert.notEqual(next, null)
assert.equal(next.hash.startsWith('Qm'), true)
assert.equal(next.payload.key, null)
assert.equal(next.payload.value, 'hello4')
})
it('implements Iterator interface', () => {
const iter = db.iterator({ limit: -1 })
let messages = []
for(let i of iter)
messages.push(i.key)
assert.equal(messages.length, items.length)
})
it('returns 1 item as default', () => {
const iter = db.iterator()
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, items[items.length - 1])
assert.equal(second, null)
assert.equal(first.payload.value, 'hello4')
})
it('returns items in the correct order', () => {
const amount = 3
const iter = db.iterator({ limit: amount })
let i = items.length - amount
for(let item of iter) {
assert.equal(item.payload.value, 'hello' + i)
i ++
}
})
}) })
it('returns 3 items', () => { describe('Collect', function() {
const messages = db.iterator({ limit: 3 }).collect() it('returns all items', () => {
assert.equal(messages.length, 3) const messages = db.iterator({ limit: -1 }).collect()
}) assert.equal(messages.length, items.length)
}) assert.equal(messages[0].payload.value, 'hello0')
assert.equal(messages[messages.length - 1].payload.value, 'hello4')
describe('Options: limit', function() { })
it('returns 1 item when limit is 0', () => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
})
it('returns 1 item when limit is 1', () => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
})
it('returns 3 items', () => {
const iter = db.iterator({ limit: 3 })
const first = iter.next().value
const second = iter.next().value
const third = iter.next().value
const fourth = iter.next().value
assert.equal(first.hash, items[items.length - 3])
assert.equal(second.hash, items[items.length - 2])
assert.equal(third.hash, items[items.length - 1])
assert.equal(fourth, null)
})
it('returns all items', () => {
const messages = db.iterator({ limit: -1 })
.collect()
.map((e) => e.hash)
messages.reverse()
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[items.length - 1])
})
it('returns all items when limit is bigger than -1', () => {
const messages = db.iterator({ limit: -300 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0])
})
it('returns all items when limit is bigger than number of items', () => {
const messages = db.iterator({ limit: 300 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0])
})
})
describe('Option: ranges', function() {
describe('gt & gte', function() {
it('returns 1 item when gte is the head', () => {
const messages = db.iterator({ gte: last(items), limit: -1 })
.collect()
.map((e) => e.hash)
it('returns 1 item', () => {
const messages = db.iterator().collect()
assert.equal(messages.length, 1) assert.equal(messages.length, 1)
assert.equal(messages[0], last(items))
}) })
it('returns 0 items when gt is the head', () => { it('returns 3 items', () => {
const messages = db.iterator({ gt: last(items) }).collect() const messages = db.iterator({ limit: 3 }).collect()
assert.equal(messages.length, 0) assert.equal(messages.length, 3)
})
})
describe('Options: limit', function() {
it('returns 1 item when limit is 0', () => {
const iter = db.iterator({ limit: 1 })
const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
}) })
it('returns 2 item when gte is defined', () => { it('returns 1 item when limit is 1', () => {
const gte = items[items.length - 2] const iter = db.iterator({ limit: 1 })
const messages = db.iterator({ gte: gte, limit: -1 }) const first = iter.next().value
const second = iter.next().value
assert.equal(first.hash, last(items))
assert.equal(second, null)
})
it('returns 3 items', () => {
const iter = db.iterator({ limit: 3 })
const first = iter.next().value
const second = iter.next().value
const third = iter.next().value
const fourth = iter.next().value
assert.equal(first.hash, items[items.length - 3])
assert.equal(second.hash, items[items.length - 2])
assert.equal(third.hash, items[items.length - 1])
assert.equal(fourth, null)
})
it('returns all items', () => {
const messages = db.iterator({ limit: -1 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, 2) messages.reverse()
assert.equal(messages[0], items[items.length - 2]) assert.equal(messages.length, items.length)
assert.equal(messages[1], items[items.length - 1]) assert.equal(messages[0], items[items.length - 1])
}) })
it('returns all items when gte is the root item', () => { it('returns all items when limit is bigger than -1', () => {
const messages = db.iterator({ gte: items[0], limit: -1 }) const messages = db.iterator({ limit: -300 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, items.length) assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0]) assert.equal(messages[0], items[0])
assert.equal(messages[messages.length - 1], last(items))
}) })
it('returns items when gt is the root item', () => { it('returns all items when limit is bigger than number of items', () => {
const messages = db.iterator({ gt: items[0], limit: -1 }) const messages = db.iterator({ limit: 300 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, itemCount - 1) assert.equal(messages.length, items.length)
assert.equal(messages[0], items[1]) assert.equal(messages[0], items[0])
assert.equal(messages[3], last(items))
})
it('returns items when gt is defined', () => {
const messages = db.iterator({ limit: -1})
.collect()
.map((e) => e.hash)
const gt = messages[2]
const messages2 = db.iterator({ gt: gt, limit: 100 })
.collect()
.map((e) => e.hash)
assert.equal(messages2.length, 2)
assert.equal(messages2[0], messages[messages.length - 2])
assert.equal(messages2[1], messages[messages.length - 1])
}) })
}) })
describe('lt & lte', function() { describe('Option: ranges', function() {
it('returns one item after head when lt is the head', () => { describe('gt & gte', function() {
const messages = db.iterator({ lt: last(items) }) it('returns 1 item when gte is the head', () => {
.collect() const messages = db.iterator({ gte: last(items), limit: -1 })
.map((e) => e.hash) .collect()
.map((e) => e.hash)
assert.equal(messages.length, 1) assert.equal(messages.length, 1)
assert.equal(messages[0], items[items.length - 2]) assert.equal(messages[0], last(items))
})
it('returns 0 items when gt is the head', () => {
const messages = db.iterator({ gt: last(items) }).collect()
assert.equal(messages.length, 0)
})
it('returns 2 item when gte is defined', () => {
const gte = items[items.length - 2]
const messages = db.iterator({ gte: gte, limit: -1 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, 2)
assert.equal(messages[0], items[items.length - 2])
assert.equal(messages[1], items[items.length - 1])
})
it('returns all items when gte is the root item', () => {
const messages = db.iterator({ gte: items[0], limit: -1 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length)
assert.equal(messages[0], items[0])
assert.equal(messages[messages.length - 1], last(items))
})
it('returns items when gt is the root item', () => {
const messages = db.iterator({ gt: items[0], limit: -1 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, itemCount - 1)
assert.equal(messages[0], items[1])
assert.equal(messages[3], last(items))
})
it('returns items when gt is defined', () => {
const messages = db.iterator({ limit: -1})
.collect()
.map((e) => e.hash)
const gt = messages[2]
const messages2 = db.iterator({ gt: gt, limit: 100 })
.collect()
.map((e) => e.hash)
assert.equal(messages2.length, 2)
assert.equal(messages2[0], messages[messages.length - 2])
assert.equal(messages2[1], messages[messages.length - 1])
})
}) })
it('returns all items when lt is head and limit is -1', () => { describe('lt & lte', function() {
const messages = db.iterator({ lt: last(items), limit: -1 }) it('returns one item after head when lt is the head', () => {
.collect() const messages = db.iterator({ lt: last(items) })
.map((e) => e.hash) .collect()
.map((e) => e.hash)
assert.equal(messages.length, items.length - 1) assert.equal(messages.length, 1)
assert.equal(messages[0], items[0]) assert.equal(messages[0], items[items.length - 2])
assert.equal(messages[messages.length - 1], items[items.length - 2]) })
})
it('returns 3 items when lt is head and limit is 3', () => { it('returns all items when lt is head and limit is -1', () => {
const messages = db.iterator({ lt: last(items), limit: 3 }) const messages = db.iterator({ lt: last(items), limit: -1 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, 3) assert.equal(messages.length, items.length - 1)
assert.equal(messages[0], items[items.length - 4]) assert.equal(messages[0], items[0])
assert.equal(messages[2], items[items.length - 2]) assert.equal(messages[messages.length - 1], items[items.length - 2])
}) })
it('returns null when lt is the root item', () => { it('returns 3 items when lt is head and limit is 3', () => {
const messages = db.iterator({ lt: items[0] }).collect() const messages = db.iterator({ lt: last(items), limit: 3 })
assert.equal(messages.length, 0) .collect()
}) .map((e) => e.hash)
it('returns one item when lte is the root item', () => { assert.equal(messages.length, 3)
const messages = db.iterator({ lte: items[0] }) assert.equal(messages[0], items[items.length - 4])
.collect() assert.equal(messages[2], items[items.length - 2])
.map((e) => e.hash) })
assert.equal(messages.length, 1) it('returns null when lt is the root item', () => {
assert.equal(messages[0], items[0]) const messages = db.iterator({ lt: items[0] }).collect()
}) assert.equal(messages.length, 0)
})
it('returns all items when lte is the head', () => { it('returns one item when lte is the root item', () => {
const messages = db.iterator({ lte: last(items), limit: -1 }) const messages = db.iterator({ lte: items[0] })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, itemCount) assert.equal(messages.length, 1)
assert.equal(messages[0], items[0]) assert.equal(messages[0], items[0])
assert.equal(messages[4], last(items)) })
})
it('returns 3 items when lte is the head', () => { it('returns all items when lte is the head', () => {
const messages = db.iterator({ lte: last(items), limit: 3 }) const messages = db.iterator({ lte: last(items), limit: -1 })
.collect() .collect()
.map((e) => e.hash) .map((e) => e.hash)
assert.equal(messages.length, 3) assert.equal(messages.length, itemCount)
assert.equal(messages[0], items[items.length - 3]) assert.equal(messages[0], items[0])
assert.equal(messages[1], items[items.length - 2]) assert.equal(messages[4], last(items))
assert.equal(messages[2], last(items)) })
it('returns 3 items when lte is the head', () => {
const messages = db.iterator({ lte: last(items), limit: 3 })
.collect()
.map((e) => e.hash)
assert.equal(messages.length, 3)
assert.equal(messages[0], items[items.length - 3])
assert.equal(messages[1], items[items.length - 2])
assert.equal(messages[2], last(items))
})
}) })
}) })
}) })

View File

@ -3,117 +3,128 @@
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/kvstore' const dbPath = './orbitdb/tests/kvstore'
const ipfsPath = './orbitdb/tests/kvstore/ipfs' const ipfsPath = './orbitdb/tests/kvstore/ipfs'
describe('orbit-db - Key-Value Store', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Key-Value Database (${API})`, function() {
this.timeout(config.timeout)
let ipfs, orbitdb1, db let ipfsd, ipfs, orbitdb1, db
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath) rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1') ipfs = ipfsd.api
}) orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
})
after(async () => { after(async () => {
if(orbitdb1) if(orbitdb1)
await orbitdb1.stop() await orbitdb1.stop()
if (ipfs) if (ipfsd)
await ipfs.stop() await stopIpfs(ipfsd)
}) })
beforeEach(async () => { beforeEach(async () => {
db = await orbitdb1.kvstore(config.dbname, { path: dbPath }) db = await orbitdb1.kvstore(config.dbname, { path: dbPath })
}) })
afterEach(async () => { afterEach(async () => {
await db.drop() await db.drop()
}) })
it('creates and opens a database', async () => { it('creates and opens a database', async () => {
db = await orbitdb1.keyvalue('first kv database') db = await orbitdb1.keyvalue('first kv database')
db = await orbitdb1.keyvalue('first kv database') assert.notEqual(db, null)
}) assert.equal(db.type, 'keyvalue')
assert.equal(db.dbname, 'first kv database')
})
it('put', async () => { it('put', async () => {
await db.put('key1', 'hello1') await db.put('key1', 'hello1')
const value = db.get('key1') const value = db.get('key1')
assert.equal(value, 'hello1') assert.equal(value, 'hello1')
}) })
it('get', async () => { it('get', async () => {
await db.put('key1', 'hello2') await db.put('key1', 'hello2')
const value = db.get('key1') const value = db.get('key1')
assert.equal(value, 'hello2') assert.equal(value, 'hello2')
}) })
it('put updates a value', async () => { it('put updates a value', async () => {
await db.put('key1', 'hello3') await db.put('key1', 'hello3')
await db.put('key1', 'hello4') await db.put('key1', 'hello4')
const value = db.get('key1') const value = db.get('key1')
assert.equal(value, 'hello4') assert.equal(value, 'hello4')
}) })
it('set is an alias for put', async () => { it('set is an alias for put', async () => {
await db.set('key1', 'hello5') await db.set('key1', 'hello5')
const value = db.get('key1') const value = db.get('key1')
assert.equal(value, 'hello5') assert.equal(value, 'hello5')
}) })
it('put/get - multiple keys', async () => { it('put/get - multiple keys', async () => {
await db.put('key1', 'hello1') await db.put('key1', 'hello1')
await db.put('key2', 'hello2') await db.put('key2', 'hello2')
await db.put('key3', 'hello3') await db.put('key3', 'hello3')
const v1 = db.get('key1') const v1 = db.get('key1')
const v2 = db.get('key2') const v2 = db.get('key2')
const v3 = db.get('key3') const v3 = db.get('key3')
assert.equal(v1, 'hello1') assert.equal(v1, 'hello1')
assert.equal(v2, 'hello2') assert.equal(v2, 'hello2')
assert.equal(v3, 'hello3') assert.equal(v3, 'hello3')
}) })
it('deletes a key', async () => { it('deletes a key', async () => {
await db.put('key1', 'hello!') await db.put('key1', 'hello!')
await db.del('key1') await db.del('key1')
const value = db.get('key1') const value = db.get('key1')
assert.equal(value, null) assert.equal(value, null)
}) })
it('deletes a key after multiple updates', async () => { it('deletes a key after multiple updates', async () => {
await db.put('key1', 'hello1') await db.put('key1', 'hello1')
await db.put('key1', 'hello2') await db.put('key1', 'hello2')
await db.put('key1', 'hello3') await db.put('key1', 'hello3')
await db.del('key1') await db.del('key1')
const value = db.get('key1') const value = db.get('key1')
assert.equal(value, null) assert.equal(value, null)
}) })
it('get - integer value', async () => { it('get - integer value', async () => {
const val = 123 const val = 123
await db.put('key1', val) await db.put('key1', val)
const v1 = db.get('key1') const v1 = db.get('key1')
assert.equal(v1, val) assert.equal(v1, val)
}) })
it('get - object value', async () => { it('get - object value', async () => {
const val = { one: 'first', two: 2 } const val = { one: 'first', two: 2 }
await db.put('key1', val) await db.put('key1', val)
const v1 = db.get('key1') const v1 = db.get('key1')
assert.deepEqual(v1, val) assert.deepEqual(v1, val)
}) })
it('get - array value', async () => { it('get - array value', async () => {
const val = [1, 2, 3, 4, 5] const val = [1, 2, 3, 4, 5]
await db.put('key1', val) await db.put('key1', val)
const v1 = db.get('key1') const v1 = db.get('key1')
assert.deepEqual(v1, val) assert.deepEqual(v1, val)
})
}) })
}) })

View File

@ -6,6 +6,9 @@ const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config') const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') const startIpfs = require('./utils/start-ipfs')
const stopIpfs = require('./utils/stop-ipfs')
const testAPIs = require('./utils/test-apis')
const connectPeers = require('./utils/connect-peers')
const waitForPeers = require('./utils/wait-for-peers') const waitForPeers = require('./utils/wait-for-peers')
const dbPath1 = './orbitdb/tests/multiple-databases/1' const dbPath1 = './orbitdb/tests/multiple-databases/1'
@ -49,139 +52,143 @@ const databaseInterfaces = [
}, },
] ]
describe('orbit-db - Multiple Databases', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Multiple Databases (${API})`, function() {
this.timeout(config.timeout)
let ipfs1, ipfs2, orbitdb1, orbitdb2, db1, db2, db3, db4 let ipfsd1, ipfsd2, ipfs1, ipfs2
let orbitdb1, orbitdb2, db1, db2, db3, db4
let localDatabases = [] let localDatabases = []
let remoteDatabases = [] let remoteDatabases = []
// Create two IPFS instances and two OrbitDB instaces (2 nodes/peers) // Create two IPFS instances and two OrbitDB instaces (2 nodes/peers)
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath1 config.daemon1.repo = ipfsPath1
config.daemon2.repo = ipfsPath2 config.daemon2.repo = ipfsPath2
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(config.daemon2.repo) rmrf.sync(config.daemon2.repo)
rmrf.sync(dbPath1) rmrf.sync(dbPath1)
rmrf.sync(dbPath2) rmrf.sync(dbPath2)
ipfs1 = await startIpfs(config.daemon1) ipfsd1 = await startIpfs(API, config.daemon1)
ipfs2 = await startIpfs(config.daemon2) ipfsd2 = await startIpfs(API, config.daemon2)
// Connect the peers manually to speed up test times ipfs1 = ipfsd1.api
await ipfs2.swarm.connect(ipfs1._peerInfo.multiaddrs._multiaddrs[0].toString()) ipfs2 = ipfsd2.api
await ipfs1.swarm.connect(ipfs2._peerInfo.multiaddrs._multiaddrs[0].toString()) // Connect the peers manually to speed up test times
orbitdb1 = new OrbitDB(ipfs1, dbPath1) await connectPeers(ipfs1, ipfs2)
orbitdb2 = new OrbitDB(ipfs2, dbPath2) orbitdb1 = new OrbitDB(ipfs1, dbPath1)
}) orbitdb2 = new OrbitDB(ipfs2, dbPath2)
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
await orbitdb2.stop()
if (ipfs1)
await ipfs1.stop()
if (ipfs2)
await ipfs2.stop()
})
beforeEach(async () => {
let options = {}
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
console.log("Creating databases and waiting for peers to connect")
// Open the databases on the first node
options = Object.assign({}, options, { create: true })
// Open the databases on the first node
for (let dbInterface of databaseInterfaces) {
const db = await dbInterface.open(orbitdb1, dbInterface.name, options)
localDatabases.push(db)
}
// Open the databases on the second node, set 'sync' flag so that
// the second peer fetches the db manifest from the network
options = Object.assign({}, options, { sync: true })
for (let [index, dbInterface] of databaseInterfaces.entries()) {
const address = localDatabases[index].address.toString()
const db = await dbInterface.open(orbitdb2, address, options)
remoteDatabases.push(db)
}
// Wait for the peers to connect
await waitForPeers(ipfs1, [orbitdb2.id], localDatabases[0].address.toString())
await waitForPeers(ipfs1, [orbitdb2.id], localDatabases[0].address.toString())
console.log("Peers connected")
})
afterEach(async () => {
for (let db of remoteDatabases)
await db.drop()
for (let db of localDatabases)
await db.drop()
})
it('replicates multiple open databases', async () => {
const entryCount = 100
const entryArr = []
// Create an array that we use to create the db entries
for (let i = 1; i < entryCount + 1; i ++)
entryArr.push(i)
// Result state,
// we count how many times 'replicated' event was fired per db
let replicated = {}
localDatabases.forEach(db => {
replicated[db.address.toString()] = 0
}) })
// Listen for the updates from remote peers after(async () => {
remoteDatabases.forEach(db => { if(orbitdb1)
db.events.on('replicated', (address) => { await orbitdb1.stop()
replicated[address] += 1
if(orbitdb2)
await orbitdb2.stop()
if (ipfsd1)
await stopIpfs(ipfsd1)
if (ipfsd2)
await stopIpfs(ipfsd2)
})
beforeEach(async () => {
let options = {}
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
console.log("Creating databases and waiting for peers to connect")
// Open the databases on the first node
options = Object.assign({}, options, { create: true })
// Open the databases on the first node
for (let dbInterface of databaseInterfaces) {
const db = await dbInterface.open(orbitdb1, dbInterface.name, options)
localDatabases.push(db)
}
// Open the databases on the second node, set 'sync' flag so that
// the second peer fetches the db manifest from the network
options = Object.assign({}, options, { sync: true })
for (let [index, dbInterface] of databaseInterfaces.entries()) {
const address = localDatabases[index].address.toString()
const db = await dbInterface.open(orbitdb2, address, options)
remoteDatabases.push(db)
}
// Wait for the peers to connect
await waitForPeers(ipfs1, [orbitdb2.id], localDatabases[0].address.toString())
await waitForPeers(ipfs1, [orbitdb2.id], localDatabases[0].address.toString())
console.log("Peers connected")
})
afterEach(async () => {
for (let db of remoteDatabases)
await db.drop()
for (let db of localDatabases)
await db.drop()
})
it('replicates multiple open databases', async () => {
const entryCount = 100
const entryArr = []
// Create an array that we use to create the db entries
for (let i = 1; i < entryCount + 1; i ++)
entryArr.push(i)
// Result state,
// we count how many times 'replicated' event was fired per db
let replicated = {}
localDatabases.forEach(db => {
replicated[db.address.toString()] = 0
}) })
})
// Write entries to each database // Listen for the updates from remote peers
console.log("Writing to databases") remoteDatabases.forEach(db => {
databaseInterfaces.forEach((dbInterface, index) => { db.events.on('replicated', (address) => {
const db = localDatabases[index] replicated[address] += 1
mapSeries(entryArr, val => dbInterface.write(db, val)) })
}) })
// Function to check if all databases have been replicated, // Write entries to each database
// we calculate this by checking number of 'replicated' events fired console.log("Writing to databases")
const allReplicated = () => { databaseInterfaces.forEach((dbInterface, index) => {
return remoteDatabases.every(db => db._oplog.length === entryCount) const db = localDatabases[index]
} mapSeries(entryArr, val => dbInterface.write(db, val))
})
console.log("Waiting for replication to finish") // Function to check if all databases have been replicated,
// we calculate this by checking number of 'replicated' events fired
const allReplicated = () => {
return remoteDatabases.every(db => db._oplog.length === entryCount)
}
return new Promise((resolve, reject) => { console.log("Waiting for replication to finish")
const interval = setInterval(() => {
if (allReplicated()) { return new Promise((resolve, reject) => {
clearInterval(interval) const interval = setInterval(() => {
// Verify that the databases contain all the right entries if (allReplicated()) {
databaseInterfaces.forEach((dbInterface, index) => { clearInterval(interval)
const db = remoteDatabases[index] // Verify that the databases contain all the right entries
const result = dbInterface.query(db) databaseInterfaces.forEach((dbInterface, index) => {
assert.equal(result, entryCount) const db = remoteDatabases[index]
assert.equal(db._oplog.length, entryCount) const result = dbInterface.query(db)
}) assert.equal(result, entryCount)
resolve() assert.equal(db._oplog.length, entryCount)
} })
}, 500) resolve()
}
}, 500)
})
}) })
}) })
}) })

View File

@ -4,257 +4,266 @@ const assert = require('assert')
const mapSeries = require('p-map-series') const mapSeries = require('p-map-series')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/persistency' const dbPath = './orbitdb/tests/persistency'
const ipfsPath = './orbitdb/tests/persistency/ipfs' const ipfsPath = './orbitdb/tests/persistency/ipfs'
describe('orbit-db - Persistency', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Persistency (${API})`, function() {
this.timeout(config.timeout)
const entryCount = 100 const entryCount = 100
let ipfs, orbitdb1, db, address let ipfsd, ipfs, orbitdb1, db, address
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath) rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1') ipfs = ipfsd.api
}) orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if (ipfs)
await ipfs.stop()
})
describe('load', function() {
beforeEach(async () => {
const dbName = new Date().getTime().toString()
const entryArr = []
for (let i = 0; i < entryCount; i ++)
entryArr.push(i)
db = await orbitdb1.eventlog(dbName)
address = db.address.toString()
await mapSeries(entryArr, (i) => db.add('hello' + i))
await db.close()
db = null
}) })
afterEach(async () => { after(async () => {
await db.drop() if(orbitdb1)
await orbitdb1.stop()
if (ipfsd)
await stopIpfs(ipfsd)
}) })
it('loads database from local cache', async () => { describe('load', function() {
db = await orbitdb1.eventlog(address) beforeEach(async () => {
await db.load() const dbName = new Date().getTime().toString()
const items = db.iterator({ limit: -1 }).collect() const entryArr = []
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello99')
})
it('loads database partially', async () => { for (let i = 0; i < entryCount; i ++)
const amount = 33 entryArr.push(i)
db = await orbitdb1.eventlog(address)
await db.load(amount)
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, amount)
assert.equal(items[0].payload.value, 'hello' + (entryCount - amount))
assert.equal(items[1].payload.value, 'hello' + (entryCount - amount + 1))
assert.equal(items[items.length - 1].payload.value, 'hello99')
})
it('load and close several times', async () => { db = await orbitdb1.eventlog(dbName)
const amount = 16 address = db.address.toString()
for (let i = 0; i < amount; i ++) { await mapSeries(entryArr, (i) => db.add('hello' + i))
await db.close()
db = null
})
afterEach(async () => {
await db.drop()
})
it('loads database from local cache', async () => {
db = await orbitdb1.eventlog(address) db = await orbitdb1.eventlog(address)
await db.load() await db.load()
const items = db.iterator({ limit: -1 }).collect() const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount) assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0') assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[1].payload.value, 'hello1')
assert.equal(items[items.length - 1].payload.value, 'hello99') assert.equal(items[items.length - 1].payload.value, 'hello99')
await db.close() })
}
})
it('closes database while loading', async () => { it('loads database partially', async () => {
db = await orbitdb1.eventlog(address) const amount = 33
db.load() // don't wait for load to finish
await db.close()
assert.equal(db._cache.store, null)
})
it('load, add one, close - several times', async () => {
const amount = 8
for (let i = 0; i < amount; i ++) {
db = await orbitdb1.eventlog(address) db = await orbitdb1.eventlog(address)
await db.load() await db.load(amount)
await db.add('hello' + (entryCount + i))
const items = db.iterator({ limit: -1 }).collect() const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount + i + 1) assert.equal(items.length, amount)
assert.equal(items[items.length - 1].payload.value, 'hello' + (entryCount + i)) assert.equal(items[0].payload.value, 'hello' + (entryCount - amount))
await db.close() assert.equal(items[1].payload.value, 'hello' + (entryCount - amount + 1))
} assert.equal(items[items.length - 1].payload.value, 'hello99')
}) })
it('loading a database emits \'ready\' event', async () => { it('load and close several times', async () => {
db = await orbitdb1.eventlog(address) const amount = 16
return new Promise(async (resolve) => { for (let i = 0; i < amount; i ++) {
db.events.on('ready', () => { db = await orbitdb1.eventlog(address)
await db.load()
const items = db.iterator({ limit: -1 }).collect() const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount) assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0') assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[1].payload.value, 'hello1')
assert.equal(items[items.length - 1].payload.value, 'hello99') assert.equal(items[items.length - 1].payload.value, 'hello99')
resolve() await db.close()
}) }
await db.load()
}) })
})
it('loading a database emits \'load.progress\' event', async () => { it('closes database while loading', async () => {
db = await orbitdb1.eventlog(address)
return new Promise(async (resolve, reject) => {
let count = 0
db.events.on('load.progress', (address, hash, entry, progress, total) => {
count ++
try {
assert.equal(address, db.address.toString())
assert.equal(total, entryCount)
assert.equal(progress, count)
assert.notEqual(hash, null)
assert.notEqual(entry, null)
if (progress === entryCount && count === entryCount) {
setTimeout(() => {
resolve()
}, 200)
}
} catch (e) {
reject(e)
}
})
// Start loading the database
await db.load()
})
})
})
describe('load from empty snapshot', function() {
it('loads database from an empty snapshot', async () => {
db = await orbitdb1.eventlog('empty-snapshot')
address = db.address.toString()
await db.saveSnapshot()
await db.close()
db = await orbitdb1.open(address)
await db.loadFromSnapshot()
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 0)
})
})
describe('load from snapshot', function() {
beforeEach(async () => {
const dbName = new Date().getTime().toString()
const entryArr = []
for (let i = 0; i < entryCount; i ++)
entryArr.push(i)
db = await orbitdb1.eventlog(dbName)
address = db.address.toString()
await mapSeries(entryArr, (i) => db.add('hello' + i))
await db.saveSnapshot()
await db.close()
db = null
})
afterEach(async () => {
await db.drop()
})
it('loads database from snapshot', async () => {
db = await orbitdb1.eventlog(address)
await db.loadFromSnapshot()
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[entryCount - 1].payload.value, 'hello99')
})
it('load, add one and save snapshot several times', async () => {
const amount = 8
for (let i = 0; i < amount; i ++) {
db = await orbitdb1.eventlog(address) db = await orbitdb1.eventlog(address)
await db.loadFromSnapshot() db.load() // don't wait for load to finish
await db.add('hello' + (entryCount + i)) await db.close()
const items = db.iterator({ limit: -1 }).collect() assert.equal(db._cache.store, null)
assert.equal(items.length, entryCount + i + 1) })
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello' + (entryCount + i)) it('load, add one, close - several times', async () => {
const amount = 8
for (let i = 0; i < amount; i ++) {
db = await orbitdb1.eventlog(address)
await db.load()
await db.add('hello' + (entryCount + i))
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount + i + 1)
assert.equal(items[items.length - 1].payload.value, 'hello' + (entryCount + i))
await db.close()
}
})
it('loading a database emits \'ready\' event', async () => {
db = await orbitdb1.eventlog(address)
return new Promise(async (resolve) => {
db.events.on('ready', () => {
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello99')
resolve()
})
await db.load()
})
})
it('loading a database emits \'load.progress\' event', async () => {
db = await orbitdb1.eventlog(address)
return new Promise(async (resolve, reject) => {
let count = 0
db.events.on('load.progress', (address, hash, entry, progress, total) => {
count ++
try {
assert.equal(address, db.address.toString())
assert.equal(total, entryCount)
assert.equal(progress, count)
assert.notEqual(hash, null)
assert.notEqual(entry, null)
if (progress === entryCount && count === entryCount) {
setTimeout(() => {
resolve()
}, 200)
}
} catch (e) {
reject(e)
}
})
// Start loading the database
await db.load()
})
})
})
describe('load from empty snapshot', function() {
it('loads database from an empty snapshot', async () => {
db = await orbitdb1.eventlog('empty-snapshot')
address = db.address.toString()
await db.saveSnapshot() await db.saveSnapshot()
await db.close() await db.close()
}
})
it('throws an error when trying to load a missing snapshot', async () => { db = await orbitdb1.open(address)
db = await orbitdb1.eventlog(address)
await db.drop()
db = null
db = await orbitdb1.eventlog(address)
let err
try {
await db.loadFromSnapshot()
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Snapshot for ${address} not found!`)
})
it('loading a database emits \'ready\' event', async () => {
db = await orbitdb1.eventlog(address)
return new Promise(async (resolve) => {
db.events.on('ready', () => {
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[entryCount - 1].payload.value, 'hello99')
resolve()
})
await db.loadFromSnapshot() await db.loadFromSnapshot()
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, 0)
}) })
}) })
it('loading a database emits \'load.progress\' event', async () => { describe('load from snapshot', function() {
db = await orbitdb1.eventlog(address) beforeEach(async () => {
return new Promise(async (resolve, reject) => { const dbName = new Date().getTime().toString()
let count = 0 const entryArr = []
db.events.on('load.progress', (address, hash, entry, progress, total) => {
count ++ for (let i = 0; i < entryCount; i ++)
try { entryArr.push(i)
assert.equal(address, db.address.toString())
assert.equal(total, entryCount) db = await orbitdb1.eventlog(dbName)
assert.equal(progress, count) address = db.address.toString()
assert.notEqual(hash, null) await mapSeries(entryArr, (i) => db.add('hello' + i))
assert.notEqual(entry, null) await db.saveSnapshot()
if (progress === entryCount && count === entryCount) { await db.close()
resolve() db = null
} })
} catch (e) {
reject(e) afterEach(async () => {
} await db.drop()
}) })
// Start loading the database
it('loads database from snapshot', async () => {
db = await orbitdb1.eventlog(address)
await db.loadFromSnapshot() await db.loadFromSnapshot()
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[entryCount - 1].payload.value, 'hello99')
})
it('load, add one and save snapshot several times', async () => {
const amount = 8
for (let i = 0; i < amount; i ++) {
db = await orbitdb1.eventlog(address)
await db.loadFromSnapshot()
await db.add('hello' + (entryCount + i))
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount + i + 1)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello' + (entryCount + i))
await db.saveSnapshot()
await db.close()
}
})
it('throws an error when trying to load a missing snapshot', async () => {
db = await orbitdb1.eventlog(address)
await db.drop()
db = null
db = await orbitdb1.eventlog(address)
let err
try {
await db.loadFromSnapshot()
} catch (e) {
err = e.toString()
}
assert.equal(err, `Error: Snapshot for ${address} not found!`)
})
it('loading a database emits \'ready\' event', async () => {
db = await orbitdb1.eventlog(address)
return new Promise(async (resolve) => {
db.events.on('ready', () => {
const items = db.iterator({ limit: -1 }).collect()
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[entryCount - 1].payload.value, 'hello99')
resolve()
})
await db.loadFromSnapshot()
})
})
it('loading a database emits \'load.progress\' event', async () => {
db = await orbitdb1.eventlog(address)
return new Promise(async (resolve, reject) => {
let count = 0
db.events.on('load.progress', (address, hash, entry, progress, total) => {
count ++
try {
assert.equal(address, db.address.toString())
assert.equal(total, entryCount)
assert.equal(progress, count)
assert.notEqual(hash, null)
assert.notEqual(entry, null)
if (progress === entryCount && count === entryCount) {
resolve()
}
} catch (e) {
reject(e)
}
})
// Start loading the database
await db.loadFromSnapshot()
})
}) })
}) })
}) })

View File

@ -4,151 +4,162 @@ const assert = require('assert')
const mapSeries = require('p-each-series') const mapSeries = require('p-each-series')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const waitForPeers = require('./utils/wait-for-peers') const {
config,
startIpfs,
stopIpfs,
testAPIs,
connectPeers,
waitForPeers,
} = require('./utils')
const dbPath1 = './orbitdb/tests/replicate-and-load/1' const dbPath1 = './orbitdb/tests/replicate-and-load/1'
const dbPath2 = './orbitdb/tests/replicate-and-load/2' const dbPath2 = './orbitdb/tests/replicate-and-load/2'
const ipfsPath1 = './orbitdb/tests/replicate-and-load/1/ipfs' const ipfsPath1 = './orbitdb/tests/replicate-and-load/1/ipfs'
const ipfsPath2 = './orbitdb/tests/replicate-and-load/2/ipfs' const ipfsPath2 = './orbitdb/tests/replicate-and-load/2/ipfs'
describe('orbit-db - Replicate and Load', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Replicate and Load (${API})`, function() {
this.timeout(config.timeout)
let ipfs1, ipfs2, orbitdb1, orbitdb2, db1, db2 let ipfsd1, ipfsd2, ipfs1, ipfs2
let orbitdb1, orbitdb2, db1, db2
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath1 config.daemon1.repo = ipfsPath1
config.daemon2.repo = ipfsPath2 config.daemon2.repo = ipfsPath2
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(config.daemon2.repo) rmrf.sync(config.daemon2.repo)
rmrf.sync(dbPath1) rmrf.sync(dbPath1)
rmrf.sync(dbPath2) rmrf.sync(dbPath2)
ipfs1 = await startIpfs(config.daemon1) ipfsd1 = await startIpfs(API, config.daemon1)
ipfs2 = await startIpfs(config.daemon2) ipfsd2 = await startIpfs(API, config.daemon2)
// Connect the peers manually to speed up test times ipfs1 = ipfsd1.api
await ipfs2.swarm.connect(ipfs1._peerInfo.multiaddrs._multiaddrs[0].toString()) ipfs2 = ipfsd2.api
await ipfs1.swarm.connect(ipfs2._peerInfo.multiaddrs._multiaddrs[0].toString()) orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb1 = new OrbitDB(ipfs1, dbPath1) orbitdb2 = new OrbitDB(ipfs2, dbPath2)
orbitdb2 = new OrbitDB(ipfs2, dbPath2) // Connect the peers manually to speed up test times
}) await connectPeers(ipfs1, ipfs2)
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
await orbitdb2.stop()
if (ipfs1)
await ipfs1.stop()
if (ipfs2)
await ipfs2.stop()
})
describe('two peers', function() {
// Opens two databases db1 and db2 and gives write-access to both of the peers
const openDatabases1 = async (options) => {
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
options = Object.assign({}, options, { path: dbPath1 })
db1 = await orbitdb1.eventlog('replicate-and-load-tests', options)
// Set 'localOnly' flag on and it'll error if the database doesn't exist locally
options = Object.assign({}, options, { path: dbPath2 })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
}
const openDatabases = async (options) => {
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
options = Object.assign({}, options, { path: dbPath1, create: true })
db1 = await orbitdb1.eventlog('tests', options)
// Set 'localOnly' flag on and it'll error if the database doesn't exist locally
options = Object.assign({}, options, { path: dbPath2 })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
}
beforeEach(async () => {
await openDatabases({ sync: true })
assert.equal(db1.address.toString(), db2.address.toString())
console.log("Waiting for peers...")
await waitForPeers(ipfs1, [orbitdb2.id], db1.address.toString())
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
console.log("Found peers")
}) })
afterEach(async () => { after(async () => {
await db1.drop() if(orbitdb1)
await db2.drop() await orbitdb1.stop()
if(orbitdb2)
await orbitdb2.stop()
if (ipfsd1)
await stopIpfs(ipfsd1)
if (ipfsd2)
await stopIpfs(ipfsd2)
}) })
it('replicates database of 100 entries and loads it from the disk', async () => { describe('two peers', function() {
const entryCount = 100 // Opens two databases db1 and db2 and gives write-access to both of the peers
const entryArr = [] const openDatabases1 = async (options) => {
let timer // Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
for (let i = 0; i < entryCount; i ++) options = Object.assign({}, options, { path: dbPath1 })
entryArr.push(i) db1 = await orbitdb1.eventlog('replicate-and-load-tests', options)
// Set 'localOnly' flag on and it'll error if the database doesn't exist locally
options = Object.assign({}, options, { path: dbPath2 })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
}
await mapSeries(entryArr, (i) => db1.add('hello' + i)) const openDatabases = async (options) => {
// Set write access for both clients
options.write = [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
return new Promise((resolve, reject) => { options = Object.assign({}, options, { path: dbPath1, create: true })
timer = setInterval(async () => { db1 = await orbitdb1.eventlog('tests', options)
const items = db2.iterator({ limit: -1 }).collect() // Set 'localOnly' flag on and it'll error if the database doesn't exist locally
if (items.length === entryCount) { options = Object.assign({}, options, { path: dbPath2 })
clearInterval(timer) db2 = await orbitdb2.eventlog(db1.address.toString(), options)
assert.equal(items.length, entryCount) }
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello99')
db2 = null beforeEach(async () => {
await openDatabases({ sync: true })
try { assert.equal(db1.address.toString(), db2.address.toString())
// Set write access for both clients console.log("Waiting for peers...")
let options = { await waitForPeers(ipfs1, [orbitdb2.id], db1.address.toString())
write: [ await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
orbitdb1.key.getPublic('hex'), console.log("Found peers")
orbitdb2.key.getPublic('hex') })
],
afterEach(async () => {
await db1.drop()
await db2.drop()
})
it('replicates database of 100 entries and loads it from the disk', async () => {
const entryCount = 100
const entryArr = []
let timer
for (let i = 0; i < entryCount; i ++)
entryArr.push(i)
await mapSeries(entryArr, (i) => db1.add('hello' + i))
return new Promise((resolve, reject) => {
timer = setInterval(async () => {
const items = db2.iterator({ limit: -1 }).collect()
if (items.length === entryCount) {
clearInterval(timer)
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello99')
db2 = null
try {
// Set write access for both clients
let options = {
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
// Get the previous address to make sure nothing mutates it
const addr = db1.address.toString()
// Open the database again (this time from the disk)
options = Object.assign({}, options, { path: dbPath1, create: false })
db1 = await orbitdb1.eventlog(addr, options)
// Set 'localOnly' flag on and it'll error if the database doesn't exist locally
options = Object.assign({}, options, { path: dbPath2, localOnly: true })
db2 = await orbitdb2.eventlog(addr, options)
await db1.load()
await db2.load()
// Make sure we have all the entries in the databases
const result1 = db1.iterator({ limit: -1 }).collect()
const result2 = db2.iterator({ limit: -1 }).collect()
assert.equal(result1.length, entryCount)
assert.equal(result2.length, entryCount)
} catch (e) {
reject(e)
} }
resolve()
// Get the previous address to make sure nothing mutates it
const addr = db1.address.toString()
// Open the database again (this time from the disk)
options = Object.assign({}, options, { path: dbPath1, create: false })
db1 = await orbitdb1.eventlog(addr, options)
// Set 'localOnly' flag on and it'll error if the database doesn't exist locally
options = Object.assign({}, options, { path: dbPath2, localOnly: true })
db2 = await orbitdb2.eventlog(addr, options)
await db1.load()
await db2.load()
// Make sure we have all the entries in the databases
const result1 = db1.iterator({ limit: -1 }).collect()
const result2 = db2.iterator({ limit: -1 }).collect()
assert.equal(result1.length, entryCount)
assert.equal(result2.length, entryCount)
} catch (e) {
reject(e)
} }
resolve() }, 100)
} })
}, 100)
}) })
}) })
}) })

View File

@ -4,156 +4,166 @@ const assert = require('assert')
const mapSeries = require('p-each-series') const mapSeries = require('p-each-series')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const waitForPeers = require('./utils/wait-for-peers') const {
config,
startIpfs,
stopIpfs,
testAPIs,
connectPeers,
waitForPeers,
} = require('./utils')
const dbPath1 = './orbitdb/tests/replicate-automatically/1' const dbPath1 = './orbitdb/tests/replicate-automatically/1'
const dbPath2 = './orbitdb/tests/replicate-automatically/2' const dbPath2 = './orbitdb/tests/replicate-automatically/2'
const ipfsPath1 = './orbitdb/tests/replicate-automatically/1/ipfs' const ipfsPath1 = './orbitdb/tests/replicate-automatically/1/ipfs'
const ipfsPath2 = './orbitdb/tests/replicate-automatically/2/ipfs' const ipfsPath2 = './orbitdb/tests/replicate-automatically/2/ipfs'
describe('orbit-db - Automatic Replication', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(config.timeout) describe(`orbit-db - Automatic Replication (${API})`, function() {
this.timeout(config.timeout)
let ipfs1, ipfs2, orbitdb1, orbitdb2, db1, db2, db3, db4 let ipfsd1, ipfsd2, ipfs1, ipfs2
let orbitdb1, orbitdb2, db1, db2, db3, db4
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath1 config.daemon1.repo = ipfsPath1
config.daemon2.repo = ipfsPath2 config.daemon2.repo = ipfsPath2
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(config.daemon2.repo) rmrf.sync(config.daemon2.repo)
rmrf.sync(dbPath1) rmrf.sync(dbPath1)
rmrf.sync(dbPath2) rmrf.sync(dbPath2)
ipfs1 = await startIpfs(config.daemon1) ipfsd1 = await startIpfs(API, config.daemon1)
ipfs2 = await startIpfs(config.daemon2) ipfsd2 = await startIpfs(API, config.daemon2)
// Connect the peers manually to speed up test times ipfs1 = ipfsd1.api
await ipfs2.swarm.connect(ipfs1._peerInfo.multiaddrs._multiaddrs[0].toString()) ipfs2 = ipfsd2.api
await ipfs1.swarm.connect(ipfs2._peerInfo.multiaddrs._multiaddrs[0].toString()) orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb1 = new OrbitDB(ipfs1, dbPath1) orbitdb2 = new OrbitDB(ipfs2, dbPath2)
orbitdb2 = new OrbitDB(ipfs2, dbPath2) // Connect the peers manually to speed up test times
}) await connectPeers(ipfs1, ipfs2)
})
after(async () => { after(async () => {
if(orbitdb1) if(orbitdb1)
await orbitdb1.stop() await orbitdb1.stop()
if(orbitdb2) if(orbitdb2)
await orbitdb2.stop() await orbitdb2.stop()
if (ipfs1) if (ipfsd1)
await ipfs1.stop() await stopIpfs(ipfsd1)
if (ipfs2) if (ipfs2)
await ipfs2.stop() await stopIpfs(ipfsd2)
}) })
beforeEach(async () => { beforeEach(async () => {
let options = {} let options = {}
// Set write access for both clients // Set write access for both clients
options.write = [ options.write = [
orbitdb1.key.getPublic('hex'), orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex') orbitdb2.key.getPublic('hex')
], ],
options = Object.assign({}, options, { path: dbPath1 }) options = Object.assign({}, options, { path: dbPath1 })
db1 = await orbitdb1.eventlog('replicate-automatically-tests', options) db1 = await orbitdb1.eventlog('replicate-automatically-tests', options)
db3 = await orbitdb1.keyvalue('replicate-automatically-tests-kv', options) db3 = await orbitdb1.keyvalue('replicate-automatically-tests-kv', options)
}) })
afterEach(async () => { afterEach(async () => {
if (db1) await db1.drop() if (db1) await db1.drop()
if (db2) await db2.drop() if (db2) await db2.drop()
if (db3) await db3.drop() if (db3) await db3.drop()
if (db4) await db4.drop() if (db4) await db4.drop()
}) })
it('starts replicating the database when peers connect', async () => { it('starts replicating the database when peers connect', async () => {
const entryCount = 10 const entryCount = 10
const entryArr = [] const entryArr = []
let options = {} let options = {}
let timer let timer
// Create the entries in the first database // Create the entries in the first database
for (let i = 0; i < entryCount; i ++) for (let i = 0; i < entryCount; i ++)
entryArr.push(i) entryArr.push(i)
await mapSeries(entryArr, (i) => db1.add('hello' + i)) await mapSeries(entryArr, (i) => db1.add('hello' + i))
// Open the second database // Open the second database
options = Object.assign({}, options, { path: dbPath2, sync: true }) options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options) db2 = await orbitdb2.eventlog(db1.address.toString(), options)
// Listen for the 'replicated' events and check that all the entries // Listen for the 'replicated' events and check that all the entries
// were replicated to the second database // were replicated to the second database
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
db2.events.on('replicated', (address) => { db2.events.on('replicated', (address) => {
try { try {
const result1 = db1.iterator({ limit: -1 }).collect() const result1 = db1.iterator({ limit: -1 }).collect()
const result2 = db2.iterator({ limit: -1 }).collect() const result2 = db2.iterator({ limit: -1 }).collect()
// Make sure we have all the entries // Make sure we have all the entries
if (result1.length === entryCount && result2.length === entryCount) { if (result1.length === entryCount && result2.length === entryCount) {
assert.deepEqual(result1, result2) assert.deepEqual(result1, result2)
resolve() resolve()
}
} catch (e) {
reject(e)
} }
} catch (e) { })
reject(e)
}
}) })
}) })
})
it('automatic replication exchanges the correct heads', async () => { it('automatic replication exchanges the correct heads', async () => {
const entryCount = 33 const entryCount = 33
const entryArr = [] const entryArr = []
let options = {} let options = {}
let timer let timer
// Create the entries in the first database // Create the entries in the first database
for (let i = 0; i < entryCount; i ++) for (let i = 0; i < entryCount; i ++)
entryArr.push(i) entryArr.push(i)
await mapSeries(entryArr, (i) => db1.add('hello' + i)) await mapSeries(entryArr, (i) => db1.add('hello' + i))
// Open the second database // Open the second database
options = Object.assign({}, options, { path: dbPath2, sync: true }) options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options) db2 = await orbitdb2.eventlog(db1.address.toString(), options)
db4 = await orbitdb2.keyvalue(db3.address.toString(), options) db4 = await orbitdb2.keyvalue(db3.address.toString(), options)
// Listen for the 'replicated' events and check that all the entries // Listen for the 'replicated' events and check that all the entries
// were replicated to the second database // were replicated to the second database
return new Promise(async (resolve, reject) => { return new Promise(async (resolve, reject) => {
db4.events.on('replicated', (address, hash, entry) => { db4.events.on('replicated', (address, hash, entry) => {
reject(new Error("Should not receive the 'replicated' event!")) reject(new Error("Should not receive the 'replicated' event!"))
}) })
// Can't check this for now as db1 might've sent the heads to db2 // Can't check this for now as db1 might've sent the heads to db2
// before we subscribe to the event // before we subscribe to the event
db2.events.on('replicate.progress', (address, hash, entry) => { db2.events.on('replicate.progress', (address, hash, entry) => {
try { try {
// Check that the head we received from the first peer is the latest // Check that the head we received from the first peer is the latest
// console.log(JSON.stringify(entry)) assert.equal(entry.payload.op, 'ADD')
assert.equal(entry.payload.op, 'ADD') assert.equal(entry.payload.key, null)
assert.equal(entry.payload.key, null) assert.notEqual(entry.payload.value.indexOf('hello'), -1)
assert.notEqual(entry.payload.value.indexOf('hello'), -1) assert.notEqual(entry.clock, null)
assert.notEqual(entry.clock, null) } catch (e) {
} catch (e) { reject(e)
reject(e)
}
})
db2.events.on('replicated', (address) => {
try {
const result1 = db1.iterator({ limit: -1 }).collect()
const result2 = db2.iterator({ limit: -1 }).collect()
// Make sure we have all the entries
if (result1.length === entryCount && result2.length === entryCount) {
assert.deepEqual(result1, result2)
resolve()
} }
} catch (e) { })
reject(e)
} db2.events.on('replicated', (address) => {
try {
const result1 = db1.iterator({ limit: -1 }).collect()
const result2 = db2.iterator({ limit: -1 }).collect()
// Make sure we have all the entries
if (result1.length === entryCount && result2.length === entryCount) {
assert.deepEqual(result1, result2)
resolve()
}
} catch (e) {
reject(e)
}
})
}) })
}) })
}) })

View File

@ -4,265 +4,71 @@ const assert = require('assert')
const mapSeries = require('p-each-series') const mapSeries = require('p-each-series')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs') // Include test utilities
const waitForPeers = require('./utils/wait-for-peers') const {
config,
startIpfs,
stopIpfs,
testAPIs,
connectPeers,
waitForPeers,
MemStore,
} = require('./utils')
const dbPath1 = './orbitdb/tests/replication/1' const dbPath1 = './orbitdb/tests/replication/1'
const dbPath2 = './orbitdb/tests/replication/2' const dbPath2 = './orbitdb/tests/replication/2'
const ipfsPath1 = './orbitdb/tests/replication/ipfs/1' const ipfsPath1 = './orbitdb/tests/replication/1/ipfs'
const ipfsPath2 = './orbitdb/tests/replication/ipfs/2' const ipfsPath2 = './orbitdb/tests/replication/2/ipfs'
const MemStore = require('./utils/mem-store') Object.keys(testAPIs).forEach(API => {
describe(`orbit-db - Replication (${API})`, function() {
this.timeout(config.timeout * 2)
describe('orbit-db - Replication', function() { let ipfsd1, ipfsd2, ipfs1, ipfs2
this.timeout(config.timeout * 2) let orbitdb1, orbitdb2, db1, db2
let id1, id2
let ipfs1, ipfs2, orbitdb1, orbitdb2, db1, db2 describe('two peers', function() {
let timer
let options
describe('two peers', function() { before(async () => {
let timer config.daemon1.repo = ipfsPath1
let options config.daemon2.repo = ipfsPath2
rmrf.sync(config.daemon1.repo)
beforeEach(async () => { rmrf.sync(config.daemon2.repo)
clearInterval(timer) rmrf.sync(dbPath1)
rmrf.sync(dbPath2)
config.daemon1.repo = ipfsPath1 ipfsd1 = await startIpfs(API, config.daemon1)
config.daemon2.repo = ipfsPath2 ipfsd2 = await startIpfs(API, config.daemon2)
rmrf.sync(config.daemon1.repo) ipfs1 = ipfsd1.api
rmrf.sync(config.daemon2.repo) ipfs2 = ipfsd2.api
rmrf.sync(dbPath1) // Use memory store for quicker tests
rmrf.sync(dbPath2) const memstore = new MemStore()
ipfs1 = await startIpfs(config.daemon1) ipfs1.object.put = memstore.put.bind(memstore)
ipfs2 = await startIpfs(config.daemon2) ipfs1.object.get = memstore.get.bind(memstore)
// Use memory store for quicker tests ipfs2.object.put = memstore.put.bind(memstore)
const memstore = new MemStore() ipfs2.object.get = memstore.get.bind(memstore)
ipfs1.object.put = memstore.put.bind(memstore) // Connect the peers manually to speed up test times
ipfs1.object.get = memstore.get.bind(memstore) await connectPeers(ipfs1, ipfs2)
ipfs2.object.put = memstore.put.bind(memstore)
ipfs2.object.get = memstore.get.bind(memstore)
// Connect the peers manually to speed up test times
await ipfs2.swarm.connect(ipfs1._peerInfo.multiaddrs._multiaddrs[0].toString())
await ipfs1.swarm.connect(ipfs2._peerInfo.multiaddrs._multiaddrs[0].toString())
orbitdb1 = new OrbitDB(ipfs1, dbPath1)
orbitdb2 = new OrbitDB(ipfs2, dbPath2)
options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
options = Object.assign({}, options, { path: dbPath1 })
db1 = await orbitdb1.eventlog('replication-tests', options)
// Set 'sync' flag on. It'll prevent creating a new local database and rather
// fetch the database from the network
// options = Object.assign({}, options, { path: dbPath2, sync: true })
// db2 = await orbitdb2.eventlog(db1.address.toString(), options)
// assert.equal(db1.address.toString(), db2.address.toString())
// await waitForPeers(ipfs1, [orbitdb2.id], db1.address.toString())
// await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
})
afterEach(async () => {
clearInterval(timer)
options = {}
if (db1)
await db1.drop()
if (db2)
await db2.drop()
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
await orbitdb2.stop()
return new Promise((resolve) => {
setTimeout(async () => {
if (ipfs1)
await ipfs1.stop()
if (ipfs2)
await ipfs2.stop()
resolve()
}, 2000)
})
})
it('replicates database of 1 entry', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
await db1.add('hello')
return new Promise(resolve => {
setTimeout(() => {
const items = db2.iterator().collect()
assert.equal(items.length, 1)
assert.equal(items[0].payload.value, 'hello')
resolve()
}, 1000)
})
})
it('replicates database of 100 entries', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
const entryCount = 100
const entryArr = []
for (let i = 0; i < entryCount; i ++)
entryArr.push(i)
await mapSeries(entryArr, (i) => db1.add('hello' + i))
return new Promise(resolve => {
timer = setInterval(() => {
const items = db2.iterator({ limit: -1 }).collect()
if (items.length === entryCount) {
clearInterval(timer)
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello99')
resolve()
}
}, 1000)
})
})
it('emits correct replication info', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
let finished = false
let eventCount = { 'replicate': 0, 'replicate.progress': 0, 'replicated': 0 }
let events = []
let expectedEventCount = 99
db2.events.on('replicate', (address, entry) => {
eventCount['replicate'] ++
events.push({
event: 'replicate',
count: eventCount['replicate'],
entry: entry,
})
}) })
db2.events.on('replicate.progress', (address, hash, entry, progress) => { after(async () => {
eventCount['replicate.progress'] ++ if (ipfsd1)
events.push({ await stopIpfs(ipfsd1)
event: 'replicate.progress',
count: eventCount['replicate.progress'], if (ipfsd2)
entry: entry , await stopIpfs(ipfsd2)
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
}) })
db2.events.on('replicated', (address) => { beforeEach(async () => {
eventCount['replicated'] ++ clearInterval(timer)
events.push({
event: 'replicated',
count: eventCount['replicate'],
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
// Resolve with a little timeout to make sure we
// don't receive more than one event
setTimeout(() => {
finished = db2.iterator({ limit: -1 }).collect().length === expectedEventCount
}, 500)
})
return new Promise((resolve, reject) => { orbitdb1 = new OrbitDB(ipfs1, dbPath1)
try { orbitdb2 = new OrbitDB(ipfs2, dbPath2)
timer = setInterval(() => {
if (finished) {
clearInterval(timer)
assert.equal(eventCount['replicate'], expectedEventCount) options = {
assert.equal(eventCount['replicate.progress'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate')
assert.equal(replicateEvents.length, expectedEventCount)
assert.equal(replicateEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateEvents[0].entry.clock.time, 1)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateProgressEvents[0].entry.clock.time, 1)
assert.equal(replicateProgressEvents[0].replicationInfo.max, 1)
assert.equal(replicateProgressEvents[0].replicationInfo.progress, 1)
const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[0].replicationInfo.max, 1)
assert.equal(replicatedEvents[0].replicationInfo.progress, 1)
resolve()
}
}, 100)
} catch (e) {
reject(e)
}
// Trigger replication
let adds = []
for (let i = 0; i < expectedEventCount; i ++) {
adds.push(i)
}
mapSeries(adds, i => db1.add('hello ' + i))
})
})
it('emits correct replication info on fresh replication', async () => {
return new Promise(async (resolve, reject) => {
let finished = false
let eventCount = { 'replicate': 0, 'replicate.progress': 0, 'replicated': 0 }
let events = []
let expectedEventCount = 512
// Close second instance
await db2.close()
await db2.drop()
// Trigger replication
let adds = []
for (let i = 0; i < expectedEventCount; i ++) {
adds.push(i)
}
const add = async (i) => {
process.stdout.write("\rWriting " + (i + 1) + " / " + expectedEventCount)
await db1.add('hello ' + i)
}
await mapSeries(adds, add)
console.log()
// Open second instance again
options = {
path: dbPath2,
overwrite: true,
sync: true,
// Set write access for both clients // Set write access for both clients
write: [ write: [
orbitdb1.key.getPublic('hex'), orbitdb1.key.getPublic('hex'),
@ -270,15 +76,88 @@ describe('orbit-db - Replication', function() {
], ],
} }
db2 = await orbitdb2.eventlog(db1.address.toString(), options) options = Object.assign({}, options, { path: dbPath1 })
db1 = await orbitdb1.eventlog('replication-tests', options)
})
let current = 0 afterEach(async () => {
let total = 0 clearInterval(timer)
options = {}
if (db1)
await db1.drop()
if (db2)
await db2.drop()
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
await orbitdb2.stop()
})
it('replicates database of 1 entry', async () => {
// Set 'sync' flag on. It'll prevent creating a new local database and rather
// fetch the database from the network
options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
await db1.add('hello')
return new Promise(resolve => {
setTimeout(() => {
const items = db2.iterator().collect()
assert.equal(items.length, 1)
assert.equal(items[0].payload.value, 'hello')
resolve()
}, 1000)
})
})
it('replicates database of 100 entries', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
const entryCount = 100
const entryArr = []
for (let i = 0; i < entryCount; i ++)
entryArr.push(i)
return new Promise(async (resolve, reject) => {
try {
await mapSeries(entryArr, (i) => db1.add('hello' + i))
} catch (e) {
reject(e)
}
timer = setInterval(() => {
const items = db2.iterator({ limit: -1 }).collect()
if (items.length === entryCount) {
clearInterval(timer)
assert.equal(items.length, entryCount)
assert.equal(items[0].payload.value, 'hello0')
assert.equal(items[items.length - 1].payload.value, 'hello99')
resolve()
}
}, 1000)
})
})
it('emits correct replication info', async () => {
options = Object.assign({}, options, { path: dbPath2, sync: true })
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
let finished = false
let eventCount = { 'replicate': 0, 'replicate.progress': 0, 'replicated': 0 }
let events = []
let expectedEventCount = 99
db2.events.on('replicate', (address, entry) => { db2.events.on('replicate', (address, entry) => {
eventCount['replicate'] ++ eventCount['replicate'] ++
total = db2._replicationInfo.max
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
events.push({ events.push({
event: 'replicate', event: 'replicate',
count: eventCount['replicate'], count: eventCount['replicate'],
@ -286,11 +165,8 @@ describe('orbit-db - Replication', function() {
}) })
}) })
db2.events.on('replicate.progress', (address, hash, entry) => { db2.events.on('replicate.progress', (address, hash, entry, progress) => {
eventCount['replicate.progress'] ++ eventCount['replicate.progress'] ++
current = db2._replicationInfo.progress
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// assert.equal(db2._replicationInfo.progress, eventCount['replicate.progress'])
events.push({ events.push({
event: 'replicate.progress', event: 'replicate.progress',
count: eventCount['replicate.progress'], count: eventCount['replicate.progress'],
@ -303,20 +179,8 @@ describe('orbit-db - Replication', function() {
}) })
}) })
db2.events.on('replicated', (address, length) => { db2.events.on('replicated', (address) => {
eventCount['replicated'] += length eventCount['replicated'] ++
current = db2._replicationInfo.progress
// console.log("[replicated]", '#' + eventCount['replicated'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished, "|", db2._loader._stats.a, db2._loader._stats.b, db2._loader._stats.c, db2._loader._stats.d)
assert.equal(current, eventCount['replicated'])
assert.equal(total, expectedEventCount)
// Test the replicator state
assert.equal(db2._loader.tasksRequested >= current, true)
assert.equal(db2._loader.tasksQueued <= db2.options.referenceCount, true)
assert.equal(db2.options.referenceCount, 64)
assert.equal(db2._loader.tasksRunning, 0)
assert.equal(db2._loader.tasksFinished, current)
events.push({ events.push({
event: 'replicated', event: 'replicated',
count: eventCount['replicate'], count: eventCount['replicate'],
@ -329,189 +193,336 @@ describe('orbit-db - Replication', function() {
// Resolve with a little timeout to make sure we // Resolve with a little timeout to make sure we
// don't receive more than one event // don't receive more than one event
setTimeout(() => { setTimeout(() => {
//console.log(eventCount['replicate.progress'], expectedEventCount) finished = db2.iterator({ limit: -1 }).collect().length === expectedEventCount
if (eventCount['replicate.progress'] === expectedEventCount)
finished = true
}, 500) }, 500)
}) })
try { return new Promise((resolve, reject) => {
try {
timer = setInterval(() => {
if (finished) {
clearInterval(timer)
assert.equal(eventCount['replicate'], expectedEventCount)
assert.equal(eventCount['replicate.progress'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate')
assert.equal(replicateEvents.length, expectedEventCount)
assert.equal(replicateEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateEvents[0].entry.clock.time, 1)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateProgressEvents[0].entry.clock.time, 1)
assert.equal(replicateProgressEvents[0].replicationInfo.max, 1)
assert.equal(replicateProgressEvents[0].replicationInfo.progress, 1)
const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[0].replicationInfo.max, 1)
assert.equal(replicatedEvents[0].replicationInfo.progress, 1)
resolve()
}
}, 100)
} catch (e) {
reject(e)
}
// Trigger replication
let adds = []
for (let i = 0; i < expectedEventCount; i ++) {
adds.push(i)
}
mapSeries(adds, i => db1.add('hello ' + i))
})
})
it('emits correct replication info on fresh replication', async () => {
return new Promise(async (resolve, reject) => {
let finished = false
let eventCount = { 'replicate': 0, 'replicate.progress': 0, 'replicated': 0 }
let events = []
let expectedEventCount = 512
// Trigger replication
let adds = []
for (let i = 0; i < expectedEventCount; i ++) {
adds.push(i)
}
const add = async (i) => {
process.stdout.write("\rWriting " + (i + 1) + " / " + expectedEventCount)
await db1.add('hello ' + i)
}
await mapSeries(adds, add)
console.log()
// Open second instance again
options = {
path: dbPath2,
overwrite: true,
sync: true,
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
let current = 0
let total = 0
db2.events.on('replicate', (address, entry) => {
eventCount['replicate'] ++
total = db2._replicationInfo.max
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
events.push({
event: 'replicate',
count: eventCount['replicate'],
entry: entry,
})
})
db2.events.on('replicate.progress', (address, hash, entry) => {
eventCount['replicate.progress'] ++
current = db2._replicationInfo.progress
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// assert.equal(db2._replicationInfo.progress, eventCount['replicate.progress'])
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
entry: entry ,
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
})
db2.events.on('replicated', (address, length) => {
eventCount['replicated'] += length
current = db2._replicationInfo.progress
// console.log("[replicated]", '#' + eventCount['replicated'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished, "|", db2._loader._stats.a, db2._loader._stats.b, db2._loader._stats.c, db2._loader._stats.d)
assert.equal(current, eventCount['replicated'])
assert.equal(total, expectedEventCount)
// Test the replicator state
assert.equal(db2._loader.tasksRequested >= current, true)
assert.equal(db2._loader.tasksQueued <= db2.options.referenceCount, true)
assert.equal(db2.options.referenceCount, 64)
assert.equal(db2._loader.tasksRunning, 0)
assert.equal(db2._loader.tasksFinished, current)
events.push({
event: 'replicated',
count: eventCount['replicate'],
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
// Resolve with a little timeout to make sure we
// don't receive more than one event
setTimeout( async () => {
// console.log(eventCount['replicate.progress'], expectedEventCount)
if (eventCount['replicate.progress'] === expectedEventCount) {
finished = true
}
}, 500)
})
const st = new Date().getTime() const st = new Date().getTime()
timer = setInterval(async () => { timer = setInterval(async () => {
if (finished) { if (finished) {
clearInterval(timer) clearInterval(timer)
// await db2.close()
const et = new Date().getTime() const et = new Date().getTime()
console.log("Duration:", et - st, "ms") console.log("Duration:", et - st, "ms")
assert.equal(eventCount['replicate'], expectedEventCount) try {
assert.equal(eventCount['replicate.progress'], expectedEventCount) assert.equal(eventCount['replicate'], expectedEventCount)
assert.equal(eventCount['replicate.progress'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate') const replicateEvents = events.filter(e => e.event === 'replicate')
assert.equal(replicateEvents.length, expectedEventCount) assert.equal(replicateEvents.length, expectedEventCount)
assert.equal(replicateEvents[0].entry.payload.value.split(' ')[0], 'hello') assert.equal(replicateEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateEvents[0].entry.clock.time, expectedEventCount) assert.equal(replicateEvents[0].entry.clock.time, expectedEventCount)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress') const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
assert.equal(replicateProgressEvents.length, expectedEventCount) assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[0].entry.payload.value.split(' ')[0], 'hello') assert.equal(replicateProgressEvents[0].entry.payload.value.split(' ')[0], 'hello')
assert.equal(replicateProgressEvents[0].entry.clock.time, expectedEventCount) assert.equal(replicateProgressEvents[0].entry.clock.time, expectedEventCount)
assert.equal(replicateProgressEvents[0].replicationInfo.max, expectedEventCount) assert.equal(replicateProgressEvents[0].replicationInfo.max, expectedEventCount)
assert.equal(replicateProgressEvents[0].replicationInfo.progress, 1) assert.equal(replicateProgressEvents[0].replicationInfo.progress, 1)
const replicatedEvents = events.filter(e => e.event === 'replicated') const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[0].replicationInfo.max, expectedEventCount) assert.equal(replicatedEvents[0].replicationInfo.max, expectedEventCount)
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.progress, expectedEventCount) assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.progress, expectedEventCount)
resolve() resolve()
} catch (e) {
reject(e)
}
} }
}, 100) }, 100)
} catch (e) { })
reject(e)
}
}) })
})
it('emits correct replication info in two-way replication', async () => { it('emits correct replication info in two-way replication', async () => {
return new Promise(async (resolve, reject) => { return new Promise(async (resolve, reject) => {
let finished = false let finished = false
let eventCount = { 'replicate': 0, 'replicate.progress': 0, 'replicated': 0 } let eventCount = { 'replicate': 0, 'replicate.progress': 0, 'replicated': 0 }
let events = [] let events = []
let expectedEventCount = 100 let expectedEventCount = 100
// Trigger replication // Trigger replication
let adds = [] let adds = []
for (let i = 0; i < expectedEventCount; i ++) { for (let i = 0; i < expectedEventCount; i ++) {
adds.push(i) adds.push(i)
} }
const add = async (i) => { const add = async (i) => {
// process.stdout.write("\rWriting " + (i + 1) + " / " + expectedEventCount) // process.stdout.write("\rWriting " + (i + 1) + " / " + expectedEventCount)
await Promise.all([db1.add('hello-1-' + i), db2.add('hello-2-' + i)]) await Promise.all([db1.add('hello-1-' + i), db2.add('hello-2-' + i)])
} }
// Open second instance again // Open second instance again
let options = { let options = {
path: dbPath2, path: dbPath2,
overwrite: true, overwrite: true,
sync: true, sync: true,
// Set write access for both clients // Set write access for both clients
write: [ write: [
orbitdb1.key.getPublic('hex'), orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex') orbitdb2.key.getPublic('hex')
], ],
} }
db2 = await orbitdb2.eventlog(db1.address.toString(), options)
await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
let current = 0 // if (db2) {
let total = 0 // await db2.drop()
// }
db2.events.on('replicate', (address, entry) => { db2 = await orbitdb2.eventlog(db1.address.toString(), options)
eventCount['replicate'] ++ await waitForPeers(ipfs2, [orbitdb1.id], db1.address.toString())
current = db2._replicationInfo.progress
total = db2._replicationInfo.max
// console.log("[replicate] ", '#' + eventCount['replicate'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
events.push({
event: 'replicate',
count: eventCount['replicate'],
entry: entry,
})
})
let prevProgress = 0 let current = 0
db2.events.on('replicate.progress', (address, hash, entry) => { let total = 0
eventCount['replicate.progress'] ++
current = db2._replicationInfo.progress
total = db2._replicationInfo.max
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// assert.equal(current, total)
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
entry: entry ,
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
})
db2.events.on('replicated', (address, length) => { db2.events.on('replicate', (address, entry) => {
eventCount['replicated'] += length eventCount['replicate'] ++
current = db2._replicationInfo.progress current = db2._replicationInfo.progress
total = db2._replicationInfo.max total = db2._replicationInfo.max
const values = db2.iterator({limit: -1}).collect() // console.log("[replicate] ", '#' + eventCount['replicate'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// console.log(current, "/", total, "/", values.length) events.push({
//console.log("[replicated]", '#' + eventCount['replicated'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished, "|", db2._loader._stats.a, db2._loader._stats.b, db2._loader._stats.c, db2._loader._stats.d) event: 'replicate',
assert.equal(current <= total, true) count: eventCount['replicate'],
events.push({ entry: entry,
event: 'replicated', })
count: eventCount['replicate'],
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
}) })
if (db2._replicationInfo.max >= expectedEventCount * 2 let prevProgress = 0
&& db2._replicationInfo.progress >= expectedEventCount * 2) db2.events.on('replicate.progress', (address, hash, entry) => {
finished = true eventCount['replicate.progress'] ++
current = db2._replicationInfo.progress
total = db2._replicationInfo.max
// console.log("[progress] ", '#' + eventCount['replicate.progress'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished)
// assert.equal(current, total)
events.push({
event: 'replicate.progress',
count: eventCount['replicate.progress'],
entry: entry ,
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
})
db2.events.on('replicated', (address, length) => {
eventCount['replicated'] += length
current = db2._replicationInfo.progress
total = db2._replicationInfo.max
const values = db2.iterator({limit: -1}).collect()
// console.log(current, "/", total, "/", values.length)
// console.log("[replicated]", '#' + eventCount['replicated'] + ':', current, '/', total, '| Tasks (in/queued/running/out):', db2._loader.tasksRequested, '/', db2._loader.tasksQueued, '/', db2._loader.tasksRunning, '/', db2._loader.tasksFinished, "|", db2._loader._stats.a, db2._loader._stats.b, db2._loader._stats.c, db2._loader._stats.d)
assert.equal(current <= total, true)
events.push({
event: 'replicated',
count: eventCount['replicate'],
replicationInfo: {
max: db2._replicationInfo.max,
progress: db2._replicationInfo.progress,
have: db2._replicationInfo.have,
},
})
if (db2._replicationInfo.max >= expectedEventCount * 2
&& db2._replicationInfo.progress >= expectedEventCount * 2)
finished = true
})
const st = new Date().getTime()
try {
await mapSeries(adds, add)
timer = setInterval(() => {
if (finished) {
clearInterval(timer)
const et = new Date().getTime()
console.log("Duration:", et - st, "ms")
// console.log(eventCount['replicate'])
assert.equal(eventCount['replicate'], expectedEventCount)
assert.equal(eventCount['replicate.progress'], expectedEventCount)
assert.equal(eventCount['replicated'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate')
assert.equal(replicateEvents.length, expectedEventCount)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].entry.clock.time, expectedEventCount)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].replicationInfo.max, expectedEventCount * 2)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].replicationInfo.progress, expectedEventCount * 2)
const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.progress, expectedEventCount * 2)
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.max, expectedEventCount * 2)
const values1 = db1.iterator({limit: -1}).collect()
const values2 = db2.iterator({limit: -1}).collect()
assert.deepEqual(values1, values2)
// Test the replicator state
assert.equal(db1._loader.tasksRequested, expectedEventCount)
assert.equal(db1._loader.tasksQueued, 0)
assert.equal(db1._loader.tasksRunning, 0)
assert.equal(db1._loader.tasksFinished, expectedEventCount)
assert.equal(db2._loader.tasksRequested, expectedEventCount)
assert.equal(db2._loader.tasksQueued, 0)
assert.equal(db2._loader.tasksRunning, 0)
assert.equal(db2._loader.tasksFinished, expectedEventCount)
resolve()
}
}, 100)
} catch (e) {
reject(e)
}
}) })
const st = new Date().getTime()
try {
await mapSeries(adds, add)
timer = setInterval(() => {
if (finished) {
clearInterval(timer)
const et = new Date().getTime()
console.log("Duration:", et - st, "ms")
assert.equal(eventCount['replicate'], expectedEventCount)
assert.equal(eventCount['replicate.progress'], expectedEventCount)
assert.equal(eventCount['replicated'], expectedEventCount)
const replicateEvents = events.filter(e => e.event === 'replicate')
assert.equal(replicateEvents.length, expectedEventCount)
const replicateProgressEvents = events.filter(e => e.event === 'replicate.progress')
assert.equal(replicateProgressEvents.length, expectedEventCount)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].entry.clock.time, expectedEventCount)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].replicationInfo.max, expectedEventCount * 2)
assert.equal(replicateProgressEvents[replicateProgressEvents.length - 1].replicationInfo.progress, expectedEventCount * 2)
const replicatedEvents = events.filter(e => e.event === 'replicated')
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.progress, expectedEventCount * 2)
assert.equal(replicatedEvents[replicatedEvents.length - 1].replicationInfo.max, expectedEventCount * 2)
const values1 = db1.iterator({limit: -1}).collect()
const values2 = db2.iterator({limit: -1}).collect()
assert.deepEqual(values1, values2)
// Test the replicator state
assert.equal(db1._loader.tasksRequested, expectedEventCount)
assert.equal(db1._loader.tasksQueued, 0)
assert.equal(db1._loader.tasksRunning, 0)
assert.equal(db1._loader.tasksFinished, expectedEventCount)
assert.equal(db2._loader.tasksRequested, expectedEventCount)
assert.equal(db2._loader.tasksQueued, 0)
assert.equal(db2._loader.tasksRunning, 0)
assert.equal(db2._loader.tasksFinished, expectedEventCount)
resolve()
}
}, 100)
} catch (e) {
reject(e)
}
}) })
}) })
}) })
}) })

View File

@ -0,0 +1,10 @@
'use strict'
const connectIpfsNodes = async (ipfs1, ipfs2) => {
const id1 = await ipfs1.id()
const id2 = await ipfs2.id()
await ipfs1.swarm.connect(id2.addresses[0])
await ipfs2.swarm.connect(id1.addresses[0])
}
module.exports = connectIpfsNodes

8
test/utils/index.js Normal file
View File

@ -0,0 +1,8 @@
exports.config = require('./config.js')
exports.testAPIs = require('./test-apis')
exports.startIpfs = require('./start-ipfs')
exports.stopIpfs = require('./stop-ipfs')
exports.waitForPeers = require('./wait-for-peers')
exports.connectPeers = require('./connect-peers')
exports.MemStore = require('./mem-store')
exports.CustomTestKeystore = require('./custom-test-keystore')

View File

@ -1,17 +1,42 @@
'use strict' 'use strict'
const IPFS = require('ipfs') const IPFSFactory = require('ipfsd-ctl')
const testAPIs = require('./test-apis')
/** /**
* Start an IPFS instance * Start an IPFS instance
* @param {Object} config [IPFS configuration to use] * @param {Object} config [IPFS configuration to use]
* @return {[Promise<IPFS>]} [IPFS instance] * @return {[Promise<IPFS>]} [IPFS instance]
*/ */
const startIpfs = (config = {}) => { const startIpfs = (type, config = {}) => {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const ipfs = new IPFS(config) if (!testAPIs[type]) {
ipfs.on('error', reject) reject(new Error(`Wanted API type ${JSON.stringify(type)} is unknown. Available types: ${Object.keys(testAPIs).join(', ')}`))
ipfs.on('ready', () => resolve(ipfs)) }
// If we're starting a process, pass command line arguments to it
if (!config.args) {
config.args = ['--enable-pubsub-experiment']
}
// Spawn an IPFS daemon (type defined in)
IPFSFactory
.create(testAPIs[type])
.spawn(config, async (err, ipfsd) => {
if (err) {
reject(err)
}
// Monkey patch _peerInfo to the ipfs api/instance
// to make js-ipfs-api compatible with js-ipfs
// TODO: Get IPFS id via coherent API call (without it being asynchronous)
if (!ipfsd.api._peerInfo) {
let { id } = await ipfsd.api.id()
ipfsd.api._peerInfo = { id: { _idB58String: id } }
}
resolve(ipfsd)
})
}) })
} }

17
test/utils/stop-ipfs.js Normal file
View File

@ -0,0 +1,17 @@
'use strict'
/**
* Stop an IPFS or ipfsd-ctl instance
* @param {Object} config [IPFS ipfsd-ctl to stop]
* @return {None}
*/
const stopIpfs = (ipfs) => {
return new Promise(async (resolve, reject) => {
ipfs.stop((err) => {
if (err) { reject(err) }
resolve()
})
})
}
module.exports = stopIpfs

26
test/utils/test-apis.js Normal file
View File

@ -0,0 +1,26 @@
const IPFS = require('ipfs')
// Available daemon types are defined in:
// https://github.com/ipfs/js-ipfsd-ctl#ipfsfactory---const-f--ipfsfactorycreateoptions
let jsIpfs = {
'js-ipfs': {
type: 'proc',
exec: IPFS,
}
}
const goIpfs = {
'go-ipfs': {
type: 'go',
}
}
// IPFS daemons to run the tests with
let testAPIs = Object.assign({}, jsIpfs)
// By default, we only run tests against js-ipfs.
// Setting env variable 'TEST=all' will make tests run with go-ipfs also.
if (process.env.TEST === 'all')
testAPIs = Object.assign({}, testAPIs, goIpfs)
module.exports = testAPIs

View File

@ -3,11 +3,17 @@
const assert = require('assert') const assert = require('assert')
const rmrf = require('rimraf') const rmrf = require('rimraf')
const OrbitDB = require('../src/OrbitDB') const OrbitDB = require('../src/OrbitDB')
const config = require('./utils/config')
const startIpfs = require('./utils/start-ipfs')
const dbPath = './orbitdb/tests/sync' // Include test utilities
const ipfsPath = './orbitdb/tests/feed/ipfs' const {
config,
startIpfs,
stopIpfs,
testAPIs,
} = require('./utils')
const dbPath = './orbitdb/tests/write-permissions'
const ipfsPath = './orbitdb/tests/write-permissions/ipfs'
const databases = [ const databases = [
{ {
@ -52,189 +58,192 @@ const databases = [
}, },
] ]
describe('orbit-db - Write Permissions', function() { Object.keys(testAPIs).forEach(API => {
this.timeout(20000) describe(`orbit-db - Write Permissions (${API})`, function() {
this.timeout(20000)
let ipfs, orbitdb1, orbitdb2 let ipfsd, ipfs, orbitdb1, orbitdb2
before(async () => { before(async () => {
config.daemon1.repo = ipfsPath config.daemon1.repo = ipfsPath
rmrf.sync(config.daemon1.repo) rmrf.sync(config.daemon1.repo)
rmrf.sync(dbPath) rmrf.sync(dbPath)
ipfs = await startIpfs(config.daemon1) ipfsd = await startIpfs(API, config.daemon1)
orbitdb1 = new OrbitDB(ipfs, dbPath + '/1') ipfs = ipfsd.api
orbitdb2 = new OrbitDB(ipfs, dbPath + '/2') orbitdb1 = new OrbitDB(ipfs, dbPath + '/1')
}) orbitdb2 = new OrbitDB(ipfs, dbPath + '/2')
after(async () => {
if(orbitdb1)
await orbitdb1.stop()
if(orbitdb2)
await orbitdb2.stop()
if (ipfs)
await ipfs.stop()
})
describe('allows multiple peers to write to the databases', function() {
databases.forEach(async (database) => {
it(database.type + ' allows multiple writers', async () => {
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
const db1 = await database.create(orbitdb1, 'sync-test', options)
options = Object.assign({}, options, { sync: true })
const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db1)
await database.tryInsert(db2)
assert.deepEqual(database.getTestValue(db1), database.expectedValue)
assert.deepEqual(database.getTestValue(db2), database.expectedValue)
await db1.close()
await db2.close()
})
}) })
})
describe('syncs databases', function() { after(async () => {
databases.forEach(async (database) => { if(orbitdb1)
it(database.type + ' syncs', async () => { await orbitdb1.stop()
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
const db1 = await database.create(orbitdb1, 'sync-test', options) if(orbitdb2)
options = Object.assign({}, options, { sync: true }) await orbitdb2.stop()
const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db2) if (ipfsd)
await stopIpfs(ipfsd)
assert.equal(database.query(db1).length, 0)
db1.sync(db2._oplog.heads)
return new Promise(resolve => {
setTimeout(async () => {
const value = database.getTestValue(db1)
assert.deepEqual(value, database.expectedValue)
await db1.close()
await db2.close()
resolve()
}, 300)
})
})
}) })
})
describe('syncs databases that anyone can write to', function() { describe('allows multiple peers to write to the databases', function() {
databases.forEach(async (database) => { databases.forEach(async (database) => {
it(database.type + ' syncs', async () => { it(database.type + ' allows multiple writers', async () => {
let options = { let options = {
// Set write permission for everyone // Set write access for both clients
write: ['*'], write: [
} orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
const db1 = await database.create(orbitdb1, 'sync-test-public-dbs', options) const db1 = await database.create(orbitdb1, 'sync-test', options)
options = Object.assign({}, options, { sync: true })
const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db2)
assert.equal(database.query(db1).length, 0)
db1.sync(db2._oplog.heads)
return new Promise(resolve => {
setTimeout(async () => {
const value = database.getTestValue(db1)
assert.deepEqual(value, database.expectedValue)
await db1.close()
await db2.close()
resolve()
}, 300)
})
})
})
})
describe('doesn\'t sync if peer is not allowed to write to the database', function() {
databases.forEach(async (database) => {
it(database.type + ' doesn\'t sync', async () => {
let options = {
// Only peer 1 can write
write: [orbitdb1.key.getPublic('hex')],
}
let err
options = Object.assign({}, options, { path: dbPath + '/sync-test/1' })
const db1 = await database.create(orbitdb1, 'write error test 1', options)
options = Object.assign({}, options, { path: dbPath + '/sync-test/2', sync: true })
const db2 = await database.create(orbitdb2, 'write error test 1', options)
try {
// Catch replication event if the update from peer 2 got synced and into the database
db1.events.on('replicated', () => err = new Error('Shouldn\'t replicate!'))
// Try to update from peer 2, this shouldn't be allowed
await database.tryInsert(db2)
} catch (e) {
// Make sure peer 2's instance throws an error
assert.equal(e.toString(), 'Error: Not allowed to write')
}
// Make sure nothing was added to the database
assert.equal(database.query(db1).length, 0)
// Try to sync peer 1 with peer 2, this shouldn't produce anything
// at peer 1 (nothing was supposed to be added to the database by peer 2)
db1.sync(db2._oplog.heads)
return new Promise((resolve, reject) => {
setTimeout(async () => {
// Make sure nothing was added
assert.equal(database.query(db1).length, 0)
await db1.close()
await db2.close()
if (err) {
reject(err)
} else {
resolve()
}
}, 300)
})
})
})
})
describe('throws an error if peer is not allowed to write to the database', function() {
databases.forEach(async (database) => {
it(database.type + ' throws an error', async () => {
let options = {
// No write access (only creator of the database can write)
write: [],
}
let err
try {
const db1 = await database.create(orbitdb1, 'write error test 2', options)
options = Object.assign({}, options, { sync: true }) options = Object.assign({}, options, { sync: true })
const db2 = await database.create(orbitdb2, db1.address.toString(), options) const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db1)
await database.tryInsert(db2) await database.tryInsert(db2)
} catch (e) {
err = e.toString() assert.deepEqual(database.getTestValue(db1), database.expectedValue)
} assert.deepEqual(database.getTestValue(db2), database.expectedValue)
assert.equal(err, 'Error: Not allowed to write')
await db1.close()
await db2.close()
})
})
})
describe('syncs databases', function() {
databases.forEach(async (database) => {
it(database.type + ' syncs', async () => {
let options = {
// Set write access for both clients
write: [
orbitdb1.key.getPublic('hex'),
orbitdb2.key.getPublic('hex')
],
}
const db1 = await database.create(orbitdb1, 'sync-test', options)
options = Object.assign({}, options, { sync: true })
const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db2)
assert.equal(database.query(db1).length, 0)
db1.sync(db2._oplog.heads)
return new Promise(resolve => {
setTimeout(async () => {
const value = database.getTestValue(db1)
assert.deepEqual(value, database.expectedValue)
await db1.close()
await db2.close()
resolve()
}, 300)
})
})
})
})
describe('syncs databases that anyone can write to', function() {
databases.forEach(async (database) => {
it(database.type + ' syncs', async () => {
let options = {
// Set write permission for everyone
write: ['*'],
}
const db1 = await database.create(orbitdb1, 'sync-test-public-dbs', options)
options = Object.assign({}, options, { sync: true })
const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db2)
assert.equal(database.query(db1).length, 0)
db1.sync(db2._oplog.heads)
return new Promise(resolve => {
setTimeout(async () => {
const value = database.getTestValue(db1)
assert.deepEqual(value, database.expectedValue)
await db1.close()
await db2.close()
resolve()
}, 300)
})
})
})
})
describe('doesn\'t sync if peer is not allowed to write to the database', function() {
databases.forEach(async (database) => {
it(database.type + ' doesn\'t sync', async () => {
let options = {
// Only peer 1 can write
write: [orbitdb1.key.getPublic('hex')],
}
let err
options = Object.assign({}, options, { path: dbPath + '/sync-test/1' })
const db1 = await database.create(orbitdb1, 'write error test 1', options)
options = Object.assign({}, options, { path: dbPath + '/sync-test/2', sync: true })
const db2 = await database.create(orbitdb2, 'write error test 1', options)
try {
// Catch replication event if the update from peer 2 got synced and into the database
db1.events.on('replicated', () => err = new Error('Shouldn\'t replicate!'))
// Try to update from peer 2, this shouldn't be allowed
await database.tryInsert(db2)
} catch (e) {
// Make sure peer 2's instance throws an error
assert.equal(e.toString(), 'Error: Not allowed to write')
}
// Make sure nothing was added to the database
assert.equal(database.query(db1).length, 0)
// Try to sync peer 1 with peer 2, this shouldn't produce anything
// at peer 1 (nothing was supposed to be added to the database by peer 2)
db1.sync(db2._oplog.heads)
return new Promise((resolve, reject) => {
setTimeout(async () => {
// Make sure nothing was added
assert.equal(database.query(db1).length, 0)
await db1.close()
await db2.close()
if (err) {
reject(err)
} else {
resolve()
}
}, 300)
})
})
})
})
describe('throws an error if peer is not allowed to write to the database', function() {
databases.forEach(async (database) => {
it(database.type + ' throws an error', async () => {
let options = {
// No write access (only creator of the database can write)
write: [],
}
let err
try {
const db1 = await database.create(orbitdb1, 'write error test 2', options)
options = Object.assign({}, options, { sync: true })
const db2 = await database.create(orbitdb2, db1.address.toString(), options)
await database.tryInsert(db2)
} catch (e) {
err = e.toString()
}
assert.equal(err, 'Error: Not allowed to write')
})
}) })
}) })
}) })