This commit is contained in:
haad 2023-02-01 11:13:53 +02:00
parent 42a5c2d723
commit 40494246e1
87 changed files with 2707 additions and 665 deletions

View File

@ -22,9 +22,9 @@ const queryLoop = async () => {
console.log('Starting benchmark...')
const identity = await IdentityProvider.createIdentity({ id: 'userA' })
const storage = MemoryStorage()
const storage = await MemoryStorage()
log = Log(identity, { logId: 'A', storage })
log = await Log(identity, { logId: 'A', storage })
// Output metrics at 1 second interval
setInterval(() => {

View File

@ -41,22 +41,6 @@ export default (env, argv) => {
},
module: {
rules: [
// {
// test: /\.m?js$/,
// exclude: /node_modules/,
// use: {
// loader: 'babel-loader',
// options: {
// presets: ['@babel/preset-env'],
// plugins: ['@babel/plugin-syntax-import-assertions']
// }
// }
// },
{
// For inlining the fixture keys in browsers tests
test: /userA|userB|userC|userD|0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6|032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214|02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260|03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c$/,
loader: 'json-loader'
}
]
}
}

2
dist/ipfslog.min.js vendored

File diff suppressed because one or more lines are too long

View File

@ -95,6 +95,15 @@ export default class Keystore {
return hasKey
}
async addKey (id, key) {
try {
await this._store.put(id, JSON.stringify(key))
} catch (e) {
console.log(e)
}
this._cache.set(id, key)
}
async createKey (id, { entropy } = {}) {
if (!id) {
throw new Error('id needed to create a key')

51
src/composed-storage.js Normal file
View File

@ -0,0 +1,51 @@
const ComposedStorage = async (...storages) => {
const put = async (hash, data) => {
for await (const storage of storages) {
await storage.put(hash, data)
}
}
const get = async (hash) => {
for await (const storage of storages) {
const value = await storage.get(hash)
if (value) {
return value
}
}
}
const iterator = async function * () {
return storages[0].iterator()
}
const merge = async (other) => {
for await (const storage1 of storages) {
for await (const storage2 of storages) {
await storage1.merge(storage2)
}
}
}
const clear = async () => {
for await (const storage of storages) {
await storage.clear()
}
}
const close = async () => {
for await (const storage of storages) {
await storage.close()
}
}
return {
put,
get,
iterator,
merge,
clear,
close
}
}
export default ComposedStorage

View File

@ -1,12 +1,26 @@
const Database = async (OpLog, ipfs, identity, id, access, storage) => {
import { Level } from 'level'
import { EventEmitter } from 'events'
const valueEncoding = 'view'
const defaultPointerCount = 64
const Database = async ({ OpLog, ipfs, identity, databaseId, accessController, storage }) => {
const { Log, Entry, IPFSBlockStorage } = OpLog
storage = storage || IPFSBlockStorage(null, { ipfs, timeout: 3000, pin: true })
const log = Log(identity, { logId: id, access, storage })
storage = storage || await IPFSBlockStorage({ ipfs, pin: true })
const path = `./${identity.id}/${databaseId}_state`
const stateStorage = new Level(path, { valueEncoding })
await stateStorage.open()
const log = await Log(identity, { logId: databaseId, access: accessController, storage, stateStorage })
const events = new EventEmitter()
const addOperation = async (op) => {
const entry = await log.append(op, { pointerCount: 8 })
await ipfs.pubsub.publish(id, entry.bytes)
const entry = await log.append(op, { pointerCount: defaultPointerCount })
await ipfs.pubsub.publish(databaseId, entry.bytes)
events.emit('update', entry)
return entry.hash
}
@ -18,27 +32,45 @@ const Database = async (OpLog, ipfs, identity, id, access, storage) => {
await sync(message.data)
}
} catch (e) {
console.error(e)
events.emit('error', e)
// console.error(e)
}
}
const sync = async (bytes) => {
const entry = await Entry.decode(bytes)
events.emit('sync', entry)
await log.joinEntry(entry)
// const updated = await log.joinEntry(entry)
// if (updated) {
events.emit('update', entry)
// }
}
const close = async () => {
await stateStorage.close()
await storage.close()
await ipfs.pubsub.unsubscribe(log.id, handleMessage)
events.emit('close')
}
const drop = async () => {
await stateStorage.clear()
await storage.clear()
}
// Automatically subscribe to the pubsub channel for this database
await ipfs.pubsub.subscribe(log.id, handleMessage)
return {
close,
databaseId,
identity,
sync,
close,
drop,
addOperation,
log
log,
events
}
}

39
src/events.js Normal file
View File

@ -0,0 +1,39 @@
const EventStore = async ({ OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const database = await Database({ OpLog, ipfs, identity, databaseId, accessController, storage })
const { addOperation, log } = database
const add = async (value) => {
return addOperation({ op: 'ADD', key: null, value })
}
const get = async (hash) => {
const entry = await log.get(hash)
return entry.payload
}
const iterator = async function * ({ gt, gte, lt, lte, amount } = {}) {
for await (const event of log.iterator({ gt, gte, lt, lte, amount })) {
yield event.payload.value
}
}
const all = async () => {
const values = []
for await (const entry of iterator()) {
values.unshift(entry)
}
return values
}
return {
...database,
type: 'events',
add,
get,
iterator,
all
}
}
export default EventStore

51
src/feed.js Normal file
View File

@ -0,0 +1,51 @@
const Feed = async ({ OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const database = await Database({ OpLog, ipfs, identity, databaseId, accessController, storage })
const { addOperation, log } = database
const add = async (value) => {
return addOperation({ op: 'ADD', key: null, value })
}
const del = async (hash) => {
return addOperation({ op: 'DEL', key: hash, value: null })
}
const get = async (hash) => {
const entry = await log.get(hash)
return entry.payload
}
const iterator = async function * ({ gt, gte, lt, lte, amount } = {}) {
const deleted = {}
for await (const entry of log.iterator({ gt, gte, lt, lte, amount })) {
const { hash, payload } = entry
const { op, key, value } = payload
if (op === 'ADD' && !deleted[hash]) {
yield value
} else if (op === 'DEL' && !deleted[key]) {
deleted[key] = true
}
}
}
const all = async () => {
const values = []
for await (const entry of iterator()) {
values.unshift(entry)
}
return values
}
return {
...database,
type: 'feed',
add,
del,
get,
iterator,
all
}
}
export default Feed

View File

@ -1,8 +1,12 @@
import { CID } from 'multiformats/cid'
import { base58btc } from 'multiformats/bases/base58'
const IPFSBlockStorage = (next, { ipfs, timeout, pin }) => {
const add = async (hash, data) => {
const defaultTimeout = 30000
const IPFSBlockStorage = async ({ ipfs, timeout, pin }) => {
timeout = timeout || defaultTimeout
const put = async (hash, data) => {
const cid = CID.parse(hash, base58btc)
await ipfs.block.put(data, {
cid: cid.bytes,
@ -12,27 +16,31 @@ const IPFSBlockStorage = (next, { ipfs, timeout, pin }) => {
pin,
timeout
})
if (next) {
return next.add(data)
}
}
const get = async (hash) => {
const cid = CID.parse(hash, base58btc)
const block = await ipfs.block.get(cid, { timeout })
if (block) {
return block
}
if (next) {
return next.get(hash)
}
}
const iterator = async function * () {}
const merge = async (other) => {}
const values = () => ({})
const clear = async () => {}
const close = async () => {}
return {
add,
put,
get,
iterator,
merge,
values
clear,
close
}
}

72
src/kv-persisted.js Normal file
View File

@ -0,0 +1,72 @@
import { Level } from 'level'
const valueEncoding = 'json'
const KeyValuePersisted = async ({ KeyValue, OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const keyValueStore = await KeyValue({ OpLog, Database, ipfs, identity, databaseId, accessController, storage })
const { events, log } = keyValueStore
const path = `./${identity.id}/${databaseId}_index`
const index = new Level(path, { valueEncoding })
await index.open()
let latestOplogHash
const updateIndex = (index) => async (entry) => {
const keys = {}
for await (const entry of log.iterator({ gt: latestOplogHash })) {
const { op, key, value } = entry.payload
if (op === 'PUT' && !keys[key]) {
keys[key] = true
await index.put(key, value)
} else if (op === 'DEL' && !keys[key]) {
keys[key] = true
await index.del(key)
}
}
latestOplogHash = entry.hash
}
const get = async (key) => {
try {
const value = await index.get(key)
if (value) {
return value
}
} catch (e) {
// LEVEL_NOT_FOUND (ie. key not found)
}
return keyValueStore.get(key)
}
const iterator = async function * () {
for await (const [key, value] of index.iterator()) {
yield { key, value }
}
}
const close = async () => {
events.off('update', updateIndex(index))
await index.close()
await keyValueStore.close()
}
const drop = async () => {
events.off('update', updateIndex(index))
await index.clear()
await keyValueStore.clear()
}
// Listen for update events from the database and update the index on every update
events.on('update', updateIndex(index))
return {
...keyValueStore,
get,
iterator,
close,
drop
}
}
export default KeyValuePersisted

View File

@ -1,19 +1,14 @@
const KeyValueStore = async (Log, Database, ipfs, identity, databaseId, accessController) => {
const database = await Database(Log, ipfs, identity, databaseId, accessController)
const KeyValue = async ({ OpLog, Database, ipfs, identity, databaseId, accessController, storage }) => {
const database = await Database({ OpLog, ipfs, identity, databaseId, accessController, storage })
const { sync, close, addOperation, log } = database
const { addOperation, log } = database
const all = async function * () {
const keys = {}
for await (const entry of log.traverse()) {
const { op, key, value } = entry.payload
if (op === 'PUT' && !keys[key]) {
keys[key] = true
yield { key, value }
} else if (op === 'DEL' && !keys[key]) {
keys[key] = true
}
}
const put = async (key, value) => {
return addOperation({ op: 'PUT', key, value })
}
const del = async (key) => {
return addOperation({ op: 'DEL', key, value: null })
}
const get = async (key) => {
@ -27,24 +22,28 @@ const KeyValueStore = async (Log, Database, ipfs, identity, databaseId, accessCo
}
}
const put = async (key, value) => {
return addOperation({ op: 'PUT', key, value })
}
const del = async (key) => {
return addOperation({ op: 'DEL', key, value: null })
const iterator = async function * () {
const keys = {}
for await (const entry of log.traverse()) {
const { op, key, value } = entry.payload
if (op === 'PUT' && !keys[key]) {
keys[key] = true
yield { key, value }
} else if (op === 'DEL' && !keys[key]) {
keys[key] = true
}
}
}
return {
...database,
type: 'kv',
put,
set: put, // Alias for put()
get,
del,
all,
sync,
close,
database
get,
iterator
}
}
export default KeyValueStore
export default KeyValue

View File

@ -1,32 +1,63 @@
import { Level } from 'level'
const valueEncoding = 'buffer'
const LevelStorage = async ({ path, valueEncoding } = {}, next) => {
path = path || './level'
// console.log("Path:", path)
const db = new Level(path, { valueEncoding: valueEncoding || 'view', passive: true })
await db.open()
const LevelStorage = (next, { id } = {}) => {
const path = id ? ('./' + id) : './level'
const values = new Level(path)
const add = async (hash, data) => {
await values.put(hash, data, { valueEncoding })
await db.put(hash, data, { valueEncoding })
if (next) {
return next.add(data)
}
}
const get = async (hash) => {
if (await values.get(hash) !== undefined) {
const v = await values.get(hash, { valueEncoding })
return v
const value = await db.get(hash, { valueEncoding })
if (value !== undefined) {
return value
}
if (next) {
return next.get(hash)
}
}
const del = async (hash) => {
await db.del(hash)
if (next) {
return next.add(hash)
}
}
// const values = async () => {
// const res = {}
// for await (const [key, value] of await db.iterator({ valueEncoding }).all()) {
// res[key] = value
// }
// return res
// }
const merge = async (other) => {}
const values_ = () => {}
const clear = async () => {
await db.clear()
}
const close = async () => {
await db.close()
}
return {
add,
get,
del,
// values,
merge,
values: values_
clear,
close
}
}

View File

@ -1,25 +1,34 @@
import LRU from 'lru'
import Entry from './entry.js'
import Clock from './lamport-clock.js'
import Sorting from './log-sorting.js'
import AccessController from './default-access-controller.js'
import { isDefined } from './utils/index.js'
import LRU from 'lru'
import IPFSBlockStorage from './ipfs-block-storage.js'
import MemoryStorage from './memory-storage.js'
import LRUStorage from './lru-storage.js'
import LevelStorage from './level-storage.js'
import ComposedStorage from './composed-storage.js'
import { isDefined } from './utils/index.js'
const { LastWriteWins, NoZeroes } = Sorting
const randomId = () => new Date().getTime().toString()
const maxClockTimeReducer = (res, acc) => Math.max(res, acc.clock.time)
// Default storage for storing the Log. Default: In Memory. Options: Memory, LRU, IPFS,
const defaultStorage = MemoryStorage()
// const defaultStorage = LevelStorage()
// const defaultStorage = IPFSBlockStorage(null, { ipfs, timeout, pin: true })
// const defaultStorage = MemoryStorage(IPFSBlockStorage(null, { ipfs, timeout, pin: true }))
// const defaultStorage = LRUStorage()
// Default storage for storing the Log and its entries. Default: Memory. Options: Memory, LRU, IPFS.
const DefaultStorage = MemoryStorage
// const DefaultStorage = LRUStorage
// const DefaultStorage = IPFSBlockStorage
// Default AccessController for the Log.
// Default policy is that anyone can write to the Log.
// Signature of an entry will always be verified regardless of AccessController policy.
// Any object that implements the function `canAppend()` that returns true|false can be
// used as an AccessController.
const DefaultAccessController = async () => {
// An AccessController may do any async initialization stuff here...
return {
canAppend: async (entry, identityProvider) => true
}
}
/**
* @description
@ -43,7 +52,7 @@ const defaultStorage = MemoryStorage()
* @param {Function} options.sortFn The sort function - by default LastWriteWins
* @return {Log} The log instance
*/
const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
const Log = async (identity, { logId, logHeads, access, storage, stateStorage, sortFn } = {}) => {
if (!isDefined(identity)) {
throw new Error('Identity is required')
}
@ -52,34 +61,46 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
}
// Set Log's id
const id = logId || randomId()
// Set heads
// TODO: need to be a LevelStorage()
logHeads = Array.from(new Set(logHeads || []))
// Access Controller
access = access || new AccessController()
access = access || await DefaultAccessController()
// Oplog entry storage
storage = storage || defaultStorage
storage = storage || await DefaultStorage()
// Add heads to the state storage, ie. init the log state
stateStorage = stateStorage || await DefaultStorage()
const newHeads = findHeads(new Set(logHeads || []))
for (const head of newHeads) {
await stateStorage.put(head.hash, true)
}
// Conflict-resolution sorting function
sortFn = NoZeroes(sortFn || LastWriteWins)
/**
* Returns an array of entries
* @returns {Array<Entry>}
*/
const heads = () => {
return logHeads.slice().sort(sortFn).reverse()
}
/**
* Returns the clock of the log.
* @returns {LamportClock}
*/
const clock = () => {
const clock = async () => {
// Find the latest clock from the heads
const maxTime = Math.max(0, heads().reduce(maxClockTimeReducer, 0))
const maxTime = Math.max(0, (await heads()).reduce(maxClockTimeReducer, 0))
return new Clock(identity.publicKey, maxTime)
}
/**
* Returns an array of entries
* @returns {Array<Entry>}
*/
const heads = async () => {
const res = []
for await (const [hash] of stateStorage.iterator()) {
if (hash) {
const entry = await get(hash)
if (entry) {
res.push(entry)
}
}
}
return res.sort(sortFn).reverse()
}
/**
* Returns the values in the log.
* @returns {Promise<Array<Entry>>}
@ -98,7 +119,11 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
* @returns {Promise<Entry|undefined>}
*/
const get = async (hash) => {
return storage.get(hash).then(Entry.decode)
const bytes = await storage.get(hash)
if (bytes) {
const entry = await Entry.decode(bytes)
return entry
}
}
/**
@ -110,13 +135,13 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
// Get references (entry at every pow2 of distance)
const refs = await getReferences(options.pointerCount)
// Create the next pointers from heads
const nexts = heads().map(entry => entry.hash)
const nexts = (await heads()).map(entry => entry.hash)
// Create the entry
const entry = await Entry.create(
identity,
id,
data,
clock().tick(),
(await clock()).tick(),
nexts,
refs
)
@ -125,10 +150,12 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
if (!canAppend) {
throw new Error(`Could not append entry:\nKey "${identity.id}" is not allowed to write to the log`)
}
// The appended entry is now the latest head
logHeads = [entry]
await stateStorage.clear()
await stateStorage.put(entry.hash, true)
// Add entry to the storage
await storage.add(entry.hash, entry.bytes)
await storage.put(entry.hash, entry.bytes)
// Return the appended entry
return entry
}
@ -150,10 +177,13 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
if (!isLog(log)) {
throw new Error('Given argument is not an instance of Log')
}
for (const entry of log.heads()) {
const heads = await log.heads()
for (const entry of heads) {
await joinEntry(entry)
}
await storage.merge(log.storage)
if (storage.merge) {
await storage.merge(log.storage)
}
}
/**
@ -170,6 +200,11 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
if (entry.id !== id) {
throw new Error(`Entry's id (${entry.id}) doesn't match the log's id (${id}).`)
}
// Return early if entry is already in the current heads
const currentHeads = await heads()
if (currentHeads.find(e => e.hash === entry.hash)) {
return
}
// Verify if entry is allowed to be added to the log
const canAppend = await access.canAppend(entry, identityProvider)
if (!canAppend) {
@ -180,10 +215,15 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
if (!isValid) {
throw new Error(`Could not validate signature for entry "${entry.hash}"`)
}
// Find the new heads
logHeads = findHeads(Array.from(new Set([...heads(), entry])))
// Find the new heads and set them as the current state
const newHeads = findHeads(new Set([...currentHeads, entry]))
await stateStorage.clear()
for (const head of newHeads) {
await stateStorage.put(head.hash, true)
}
// Add new entry to storage
await storage.add(entry.hash, entry.bytes)
await storage.put(entry.hash, entry.bytes)
return true
}
/**
@ -195,7 +235,7 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
const defaultStopFn = () => false
shouldStopFn = shouldStopFn || defaultStopFn
// Start traversal from given entries or from current heads
rootEntries = rootEntries || heads()
rootEntries = rootEntries || (await heads())
// Sort the given given root entries and use as the starting stack
let stack = rootEntries.sort(sortFn)
// Keep a record of all the hashes of entries we've traversed and yielded
@ -248,7 +288,7 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
* @examples
*
* (async () => {
* log = Log(testIdentity, { logId: 'X' })
* log = await Log(testIdentity, { logId: 'X' })
*
* for (let i = 0; i <= 100; i++) {
* await log.append('entry' + i)
@ -266,7 +306,7 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
*
*
*/
const iterator = async function * ({ amount = -1, gt, gte, lt, lte }) {
const iterator = async function * ({ amount = -1, gt, gte, lt, lte } = {}) {
// TODO: write comments on how the iterator algorithm works
if (amount === 0) {
@ -286,7 +326,7 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
if (isDefined(lt) && !Array.isArray(lt)) throw new Error('lt must be a string or an array of Entries')
if (isDefined(lte) && !Array.isArray(lte)) throw new Error('lte must be a string or an array of Entries')
const start = (lt || (lte || heads())).filter(isDefined)
const start = (lt || (lte || await heads())).filter(isDefined)
const end = (gt || gte) ? await get(gt || gte) : null
const amountToIterate = end || amount === -1
@ -339,33 +379,6 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
}
}
/**
* Find heads from a collection of entries.
*
* Finds entries that are the heads of this collection,
* ie. entries that are not referenced by other entries.
*
* @param {Array<Entry>} entries Entries to search heads from
* @returns {Array<Entry>}
*/
const findHeads = (entries) => {
const items = {}
for (const entry of entries) {
for (const next of entry.next) {
items[next] = entry.hash
}
}
const res = []
for (const entry of entries) {
if (!items[entry.hash]) {
res.push(entry)
}
}
return res
}
/**
* TODO
* Get references at every pow2 distance
@ -423,8 +436,37 @@ const Log = (identity, { logId, logHeads, access, storage, sortFn } = {}) => {
}
}
/**
* Find heads from a collection of entries.
*
* Finds entries that are the heads of this collection,
* ie. entries that are not referenced by other entries.
*
* This function is provate and not exposed in the Log API
*
* @param {Array<Entry>} entries Entries to search heads from
* @returns {Array<Entry>}
*/
const findHeads = (entries) => {
const items = {}
for (const entry of entries) {
for (const next of entry.next) {
items[next] = entry.hash
}
}
const res = []
for (const entry of entries) {
if (!items[entry.hash]) {
res.push(entry)
}
}
return res
}
export { Log }
export { Sorting }
export { Entry }
export { AccessController }
export { IPFSBlockStorage, MemoryStorage, LRUStorage, LevelStorage }
export { DefaultAccessController }
export { IPFSBlockStorage, MemoryStorage, LRUStorage, ComposedStorage }

View File

@ -2,41 +2,47 @@ import LRU from 'lru'
const defaultSize = 1000000
const LRUStorage = (next, { size } = {}) => {
const values = new LRU(size || defaultSize)
const add = async (hash, data) => {
values.set(hash, data)
if (next) {
return next.add(data)
}
const LRUStorage = async ({ size } = {}) => {
let lru = new LRU(size || defaultSize)
const put = async (hash, data) => {
lru.set(hash, data)
}
const get = async (hash) => {
if (values.peek(hash)) {
return values.get(hash)
}
if (next) {
return next.get(hash)
if (lru.peek(hash)) {
return lru.get(hash)
}
}
const iterator = async function * () {
for await (const key of lru.keys) {
const value = lru.get(key)
yield [key, value]
}
}
const merge = async (other) => {
if (other) {
Object.keys(other.values()).forEach(k => {
const value = other.get(k)
values.set(k, value)
})
for await (const [key, value] of other.iterator()) {
lru.set(key, value)
}
}
}
const values_ = () => (
values.keys.reduce((res, key) => {
res[key] = values.get(key)
return res
}, {})
)
const clear = async () => {
lru = new LRU(size || defaultSize)
}
const close = async () => {}
return {
add,
put,
get,
iterator,
merge,
values: values_
clear,
close
}
}

View File

@ -1,27 +1,41 @@
const MemoryStorage = (next) => {
let values = {}
const add = async (hash, data) => {
values[hash] = data
if (next) {
return next.add(data)
}
const MemoryStorage = async () => {
let memory = {}
const put = async (hash, data) => {
memory[hash] = data
}
const get = async (hash) => {
if (values[hash]) {
return values[hash]
}
if (next) {
return next.get(hash)
if (memory[hash]) {
return memory[hash]
}
}
const iterator = async function * () {
for await (const [key, value] of Object.entries(memory)) {
yield [key, value]
}
}
const merge = async (other) => {
values = Object.assign({}, values, other ? other.values() : {})
if (other) {
for await (const [key, value] of other.iterator()) {
put(key, value)
}
}
}
const clear = async () => (memory = {})
const close = async () => {}
return {
add,
put,
get,
iterator,
merge,
values: () => values
clear,
close
}
}

284
test/events.spec.js Normal file
View File

@ -0,0 +1,284 @@
import { deepStrictEqual, strictEqual, notStrictEqual } from 'assert'
import rimraf from 'rimraf'
import * as Log from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
import EventStore from '../src/events.js'
import Database from '../src/database.js'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs, getIpfsPeerId, connectPeers, waitForPeers } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
Object.keys(testAPIs).forEach((IPFS) => {
describe('Events Database (' + IPFS + ')', function () {
this.timeout(config.timeout * 2)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeystore
let peerId1, peerId2
let testIdentity1, testIdentity2
let kv1, kv2
const databaseId = 'events-AAA'
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
keystore = new Keystore('./keys_1')
await keystore.open()
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)
}
signingKeystore = new Keystore('./keys_2')
await signingKeystore.open()
for (const [key, value] of Object.entries(signingKeys)) {
await signingKeystore.addKey(key, value)
}
// Create an identity for each peers
testIdentity1 = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
})
afterEach(async () => {
if (kv1) {
await kv1.close()
}
if (kv2) {
await kv2.close()
}
})
after(async () => {
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
if (testIdentity2) {
rmrf(testIdentity2.id)
}
rmrf('./orbitdb')
rmrf('./keys_1')
rmrf('./keys_2')
})
describe('using database', () => {
it.skip('returns all entries in the database', async () => {
let error
let updateCount = 0
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
const onUpdate = (entry) => {
updateCount++
}
const onError = (err) => {
error = err
}
kv1 = await EventStore({ OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await EventStore({ OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
kv1.events.on('update', onUpdate)
kv2.events.on('update', onUpdate)
kv1.events.on('error', onError)
kv2.events.on('error', onError)
strictEqual(kv1.type, 'events')
strictEqual(kv2.type, 'events')
// await waitForPeers(ipfs1, [peerId2], databaseId)
// await waitForPeers(ipfs2, [peerId1], databaseId)
// send a garbage message to pubsub to test onError firing
// await ipfs1.pubsub.publish(databaseId, Uint8Array.from([1, 2, 3, 4, 5]))
await kv1.add('init')
await kv1.add(true)
await kv1.add('hello')
await kv1.add('friend')
await kv1.add(12345)
await kv1.add('empty')
await kv1.add('')
const hash = await kv1.add('friend33')
// await kv1.set('init', true)
// await kv1.set('hello', 'friend')
// await kv1.del('hello')
// await kv1.set('hello', 'friend2')
// await kv1.del('hello')
// await kv1.set('empty', '')
// await kv1.del('empty')
// const hash = await kv1.set('hello', 'friend3')
const lastEntry = await kv1.get(hash)
// const sleep = (time) => new Promise((resolve) => {
// setTimeout(() => {
// resolve()
// }, time)
// })
// await sleep(5000) // give some time for ipfs peers to sync
// sync() test
console.time('sync')
await kv2.sync(lastEntry.bytes)
console.timeEnd('sync')
// await sleep(1000) // give some time for ipfs peers to sync
// // write access test
let errorMessage
try {
await kv2.set('hello', 'friend4')
} catch (e) {
errorMessage = e.message
} finally {
const valueNotUpdated = await kv2.get('hello')
strictEqual(valueNotUpdated, 'friend3')
notStrictEqual(errorMessage, undefined)
strictEqual(errorMessage.startsWith('Could not append entry:\nKey'), true)
}
// all() test
const all2 = []
console.time('all2')
for await (const event of kv2.iterator()) {
all2.unshift(event)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
])
const all1 = await kv2.all()
deepStrictEqual(all1, [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
])
// onError test
notStrictEqual(error, undefined)
strictEqual(error.message, 'CBOR decode error: too many terminals, data makes no sense')
// onUpdate test
strictEqual(updateCount, 8 * 2)
})
})
describe('load database', () => {
it('returns all entries in the database', async () => {
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
kv1 = await EventStore({ OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await EventStore({ OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
await kv1.add('init')
await kv1.add(true)
await kv1.add('hello')
await kv1.add('friend')
await kv1.add(12345)
await kv1.add('empty')
await kv1.add('')
await kv1.add('friend33')
// const hash = await kv1.add('friend33')
// const lastEntry = await kv1.log.get(hash)
const sleep = (time) => new Promise((resolve) => {
setTimeout(() => {
resolve()
}, time)
})
await sleep(10000) // give some time for ipfs peers to sync
// sync() test
// console.time('sync')
// await kv2.sync(lastEntry.bytes)
// console.timeEnd('sync')
await kv1.close()
await kv2.close()
// await sleep(1000) // give some time for ipfs peers to sync
kv1 = await EventStore({ OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await EventStore({ OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
// all() test
const all2 = []
console.time('all2')
for await (const event of kv2.iterator()) {
all2.unshift(event)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
])
const all1 = await kv2.all()
deepStrictEqual(all1, [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
])
})
})
})
})

274
test/feed.spec.js Normal file
View File

@ -0,0 +1,274 @@
import { deepStrictEqual, strictEqual, notStrictEqual } from 'assert'
import rimraf from 'rimraf'
import * as Log from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
import Feed from '../src/feed.js'
import Database from '../src/database.js'
// Test utils
import { config, testAPIs, getIpfsPeerId, waitForPeers, startIpfs, stopIpfs, connectPeers } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
Object.keys(testAPIs).forEach((IPFS) => {
describe('Feed Database (' + IPFS + ')', function () {
this.timeout(config.timeout)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeystore
let peerId1, peerId2
let testIdentity1, testIdentity2
let kv1, kv2
const databaseId = 'feed-AAA'
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
keystore = new Keystore('./keys_1')
await keystore.open()
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)
}
signingKeystore = new Keystore('./keys_2')
await signingKeystore.open()
for (const [key, value] of Object.entries(signingKeys)) {
await signingKeystore.addKey(key, value)
}
// Create an identity for each peers
testIdentity1 = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
})
afterEach(async () => {
if (kv1) {
await kv1.close()
}
if (kv2) {
await kv2.close()
}
})
after(async () => {
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
if (testIdentity2) {
rmrf(testIdentity2.id)
}
rmrf('./orbitdb')
rmrf('./keys_1')
rmrf('./keys_2')
})
describe('using database', () => {
it.skip('returns all entries in the database', async () => {
let error
let updateCount = 0
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
const onUpdate = (entry) => {
updateCount++
}
const onError = (err) => {
error = err
}
kv1 = await Feed({ OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await Feed({ OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
kv1.events.on('update', onUpdate)
kv2.events.on('update', onUpdate)
kv1.events.on('error', onError)
kv2.events.on('error', onError)
strictEqual(kv1.type, 'events')
strictEqual(kv2.type, 'events')
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
// send a garbage message to pubsub to test onError firing
// await ipfs1.pubsub.publish(databaseId, Uint8Array.from([1, 2, 3, 4, 5]))
await kv1.add('init')
await kv1.add(true)
await kv1.add('hello')
await kv1.add('friend')
await kv1.add(12345)
await kv1.add('empty')
await kv1.add('')
await kv1.add('friend33')
// const hash = await kv1.add('friend33')
// const lastEntry = await kv1.get(hash)
const sleep = (time) => new Promise((resolve) => {
setTimeout(() => {
resolve()
}, time)
})
await sleep(10000) // give some time for ipfs peers to sync
// // sync() test
// console.time('sync')
// await kv2.sync(lastEntry.bytes)
// console.timeEnd('sync')
// await sleep(1000) // give some time for ipfs peers to sync
// // write access test
// let errorMessage
// try {
// await kv2.set('hello', 'friend4')
// } catch (e) {
// errorMessage = e.message
// } finally {
// const valueNotUpdated = await kv2.get('hello')
// strictEqual(valueNotUpdated, 'friend3')
// notStrictEqual(errorMessage, undefined)
// strictEqual(errorMessage.startsWith('Could not append entry:\nKey'), true)
// }
// all() test
const all2 = []
console.time('all2')
for await (const event of kv2.iterator()) {
all2.unshift(event)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
])
const all1 = await kv2.all()
deepStrictEqual(all1, [
'init',
true,
'hello',
'friend',
12345,
'empty',
'',
'friend33'
])
// onError test
notStrictEqual(error, undefined)
strictEqual(error.message, 'CBOR decode error: too many terminals, data makes no sense')
// onUpdate test
strictEqual(updateCount, 8 * 2)
})
})
describe('load database', () => {
it('returns all entries in the database', async () => {
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
kv1 = await Feed({ OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await Feed({ OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
await kv1.add('init')
const hashA = await kv1.add(true)
await kv1.add('hello')
await kv1.add('friend')
await kv1.add(12345)
await kv1.del(hashA)
const hashB = await kv1.add('empty')
await kv1.add('')
const hash = await kv1.add('friend33')
await kv1.del(hashB)
await kv1.del(hash)
// const hashX = await kv1.del(hash)
// const lastEntry = await kv1.log.get(hashX)
const sleep = (time) => new Promise((resolve) => {
setTimeout(() => {
resolve()
}, time)
})
await sleep(5000) // give some time for ipfs peers to sync
// sync() test
// console.time('sync')
// await kv2.sync(lastEntry.bytes)
// console.timeEnd('sync')
await kv1.close()
await kv2.close()
// await sleep(1000) // give some time for ipfs peers to sync
kv1 = await Feed({ OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await Feed({ OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
// all() test
const all2 = []
console.time('all2')
for await (const event of kv2.iterator()) {
all2.unshift(event)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
'init',
'hello',
'friend',
12345,
''
])
const all1 = await kv2.all()
deepStrictEqual(all1, [
'init',
'hello',
'friend',
12345,
''
])
})
})
})
})

View File

@ -0,0 +1,8 @@
ŽCome hang out in our IRC chat room if you have any questions.
Contact the ipfs dev team:
- Bugs: https://github.com/ipfs/go-ipfs/issues
- Help: irc.freenode.org/#ipfs
- Email: dev@ipfs.io
½

View File

@ -0,0 +1,9 @@
¿·Some helpful resources for finding your way around ipfs:
- quick-start: a quick show of various ipfs features.
- ipfs commands: a list of all commands
- ipfs --help: every command describes itself
- https://github.com/ipfs/go-ipfs -- the src repository
- #ipfs on irc.freenode.org -- the community irc channel
·

View File

@ -0,0 +1,115 @@
½ µ # 0.1 - Quick Start
This is a set of short examples with minimal explanation. It is meant as
a "quick start". Soon, we'll write a longer tour :-)
Add a file to ipfs:
echo "hello world" >hello
ipfs add hello
View it:
ipfs cat <the-hash-you-got-here>
Try a directory:
mkdir foo
mkdir foo/bar
echo "baz" > foo/baz
echo "baz" > foo/bar/baz
ipfs add -r foo
View things:
ipfs ls <the-hash-here>
ipfs ls <the-hash-here>/bar
ipfs cat <the-hash-here>/baz
ipfs cat <the-hash-here>/bar/baz
ipfs cat <the-hash-here>/bar
ipfs ls <the-hash-here>/baz
References:
ipfs refs <the-hash-here>
ipfs refs -r <the-hash-here>
ipfs refs --help
Get:
ipfs get <the-hash-here> -o foo2
diff foo foo2
Objects:
ipfs object get <the-hash-here>
ipfs object get <the-hash-here>/foo2
ipfs object --help
Pin + GC:
ipfs pin add <the-hash-here>
ipfs repo gc
ipfs ls <the-hash-here>
ipfs pin rm <the-hash-here>
ipfs repo gc
Daemon:
ipfs daemon (in another terminal)
ipfs id
Network:
(must be online)
ipfs swarm peers
ipfs id
ipfs cat <hash-of-remote-object>
Mount:
(warning: fuse is finicky!)
ipfs mount
cd /ipfs/<the-hash-here>
ls
Tool:
ipfs version
ipfs update
ipfs commands
ipfs config --help
open http://localhost:5001/webui
Browse:
webui:
http://localhost:5001/webui
video:
http://localhost:8080/ipfs/QmVc6zuAneKJzicnJpfrqCH9gSy6bz54JhcypfJYhGUFQu/play#/ipfs/QmTKZgRNwDNZwHtJSjCp6r5FYefzpULfy37JvMt9DwvXse
images:
http://localhost:8080/ipfs/QmZpc3HvfjEXvLWGQPWbHk3AjD5j8NEN4gmFN8Jmrd5g83/cs
markdown renderer app:
http://localhost:8080/ipfs/QmX7M9CiYXjVeFnkfVGf3y5ixTZ2ACeSGyL1vBJY1HvQPp/mdown
µ

View File

@ -0,0 +1,27 @@
Š IPFS Alpha Security Notes
We try hard to ensure our system is safe and robust, but all software
has bugs, especially new software. This distribution is meant to be an
alpha preview, don't use it for anything mission critical.
Please note the following:
- This is alpha software and has not been audited. It is our goal
to conduct a proper security audit once we close in on a 1.0 release.
- ipfs is a networked program, and may have serious undiscovered
vulnerabilities. It is written in Go, and we do not execute any
user provided data. But please point any problems out to us in a
github issue, or email security@ipfs.io privately.
- security@ipfs.io GPG key:
- 4B9665FB 92636D17 7C7A86D3 50AAE8A9 59B13AF3
- https://pgp.mit.edu/pks/lookup?op=get&search=0x50AAE8A959B13AF3
- ipfs uses encryption for all communication, but it's NOT PROVEN SECURE
YET! It may be totally broken. For now, the code is included to make
sure we benchmark our operations with encryption in mind. In the future,
there will be an "unsafe" mode for high performance intranet apps.
If this is a blocking feature for you, please contact us.
Š

View File

@ -0,0 +1,4 @@
.
" èžsÜL`•>¾P}ãÈD
>ÚŸo_¸=¡"´u'Òintro§


View File

@ -0,0 +1,3 @@
Index


View File

@ -0,0 +1,36 @@
¤œWIP
# 0.0 - Introduction
Welcome to IPFS! This tour will guide you through a few of the
features of this tool, and the most common commands. Then, it will
immerse you into the world of merkledags and the amazing things
you can do with them.
This tour has many parts, and can be taken in different sequences.
Different people learn different ways, so choose your own adventure:
To start with the concepts, try:
- The Merkle DAG
- Data Structures on the Merkle DAG
- Representing Files with unixfs
- add, cat, ls, refs
...
To start with the examples, try:
- add, cat, ls, refs
- Representing Files with unixfs
- Data Structures on the Merkle DAG
- The Merkle DAG
...
To start with the network, try:
- IPFS Nodes
- Running the daemon
- The Swarm
- The Web
œ

View File

@ -0,0 +1,28 @@
ЛГHello and Welcome to IPFS!
в–€в–€в•—в–€в–€в–€в–€в–€в–€в•— в–€в–€в–€в–€в–€в–€в–€в•—в–€в–€в–€в–€в–€в–€в–€в•—
в–€в–€в•‘в–€в–€в•”в•ђв•ђв–€в–€в•—в–€в–€в•”в•ђв•ђв•ђв•ђв•ќв–€в–€в•”в•ђв•ђв•ђв•ђв•ќ
в–€в–€в•‘в–€в–€в–€в–€в–€в–€в•”в•ќв–€в–€в–€в–€в–€в•— в–€в–€в–€в–€в–€в–€в–€в•—
в–€в–€в•‘в–€в–€в•”в•ђв•ђв•ђв•ќ в–€в–€в•”в•ђв•ђв•ќ в•љв•ђв•ђв•ђв•ђв–€в–€в•‘
в–€в–€в•‘в–€в–€в•‘ в–€в–€в•‘ в–€в–€в–€в–€в–€в–€в–€в•‘
в•љв•ђв•ќв•љв•ђв•ќ в•љв•ђв•ќ в•љв•ђв•ђв•ђв•ђв•ђв•ђв•ќ
If you're seeing this, you have successfully installed
IPFS and are now interfacing with the ipfs merkledag!
-------------------------------------------------------
| Warning: |
| This is alpha software. Use at your own discretion! |
| Much is missing or lacking polish. There are bugs. |
| Not yet secure. Read the security notes for more. |
-------------------------------------------------------
Check out some of the other files in this directory:
./about
./help
./quick-start <-- usage examples
./readme <-- this file
./security-notes
Г

View File

@ -0,0 +1,3 @@
-
" õR ;<3B>—¿­ˆfq<66>aU¿õ 0 [Xè@÷8Ó·O§¦index


View File

@ -0,0 +1 @@
/repo/flatfs/shard/v1/next-to-last/2

View File

@ -0,0 +1,54 @@
Œ IPFS -- Inter-Planetary File system
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas
from Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single bit-
torrent swarm, exchanging git objects. IPFS provides an interface as simple
as the HTTP web, but with permanence built in. You can also mount the world
at /ipfs.
IPFS is a protocol:
- defines a content-addressed file system
- coordinates content delivery
- combines Kademlia + BitTorrent + Git
IPFS is a filesystem:
- has directories and files
- mountable filesystem (via FUSE)
IPFS is a web:
- can be used to view documents like the web
- files accessible via HTTP at 'http://ipfs.io/<path>'
- browsers or extensions can learn to use 'ipfs://' directly
- hash-addressed content guarantees authenticity
IPFS is modular:
- connection layer over any network protocol
- routing layer
- uses a routing layer DHT (kademlia/coral)
- uses a path-based naming service
- uses bittorrent-inspired block exchange
IPFS uses crypto:
- cryptographic-hash content addressing
- block-level deduplication
- file integrity + versioning
- filesystem-level encryption + signing support
IPFS is p2p:
- worldwide peer-to-peer file transfers
- completely decentralized architecture
- **no** central point of failure
IPFS is a cdn:
- add a file to the filesystem locally, and it's now available to the world
- caching-friendly (content-hash naming)
- bittorrent-based bandwidth distribution
IPFS has a name service:
- IPNS, an SFS inspired name system
- global namespace based on PKI
- serves to build trust chains
- compatible with other NSes
- can map DNS, .onion, .bit, etc to IPNS
Œ

22
test/fixtures/ipfs1/blocks/_README.data vendored Normal file
View File

@ -0,0 +1,22 @@
This is a repository of IPLD objects. Each IPLD object is in a single file,
named <base32 encoding of cid>.data. Where <base32 encoding of cid> is the
"base32" encoding of the CID (as specified in
https://github.com/multiformats/multibase) without the 'B' prefix.
All the object files are placed in a tree of directories, based on a
function of the CID. This is a form of sharding similar to
the objects directory in git repositories. Previously, we used
prefixes, we now use the next-to-last two charters.
func NextToLast(base32cid string) {
nextToLastLen := 2
offset := len(base32cid) - nextToLastLen - 1
return str[offset : offset+nextToLastLen]
}
For example, an object with a base58 CIDv1 of
zb2rhYSxw4ZjuzgCnWSt19Q94ERaeFhu9uSqRgjSdx9bsgM6f
has a base32 CIDv1 of
BAFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA
and will be placed at
SC/AFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA.data
with 'SC' being the last-to-next two characters and the 'B' at the
beginning of the CIDv1 string is the multibase prefix that is not
stored in the filename.

82
test/fixtures/ipfs1/config vendored Normal file
View File

@ -0,0 +1,82 @@
{
"Addresses": {
"Swarm": [
"/ip4/0.0.0.0/tcp/0"
],
"Announce": [],
"NoAnnounce": [],
"API": "/ip4/127.0.0.1/tcp/0",
"Gateway": "/ip4/0.0.0.0/tcp/0",
"RPC": "/ip4/127.0.0.1/tcp/5003",
"Delegates": [
"/dns4/node0.delegate.ipfs.io/tcp/443/https",
"/dns4/node1.delegate.ipfs.io/tcp/443/https",
"/dns4/node2.delegate.ipfs.io/tcp/443/https",
"/dns4/node3.delegate.ipfs.io/tcp/443/https"
]
},
"Discovery": {
"MDNS": {
"Enabled": true,
"Interval": 0
},
"webRTCStar": {
"Enabled": false
}
},
"Bootstrap": [],
"Pubsub": {
"Router": "gossipsub",
"Enabled": true
},
"Swarm": {
"ConnMgr": {
"LowWater": 50,
"HighWater": 200
},
"DisableNatPortMap": false
},
"Routing": {
"Type": "dhtclient"
},
"Identity": {
"PeerID": "12D3KooWK49VRnddoBhSDsYcwuzeje5XRJ9ZT3r2g5QdLzJbHQwi",
"PrivKey": "CAESQBdBLRS/jdSCzti7eJ1aS1khEYTuZgIuOhFyg+2eqfBNiUD6j6fuMapKVqFyuQtQzHXrAEd9bCfZUfhe82wxBqE="
},
"Datastore": {
"Spec": {
"type": "mount",
"mounts": [
{
"mountpoint": "/blocks",
"type": "measure",
"prefix": "flatfs.datastore",
"child": {
"type": "flatfs",
"path": "blocks",
"sync": true,
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2"
}
},
{
"mountpoint": "/",
"type": "measure",
"prefix": "leveldb.datastore",
"child": {
"type": "levelds",
"path": "datastore",
"compression": "none"
}
}
]
}
},
"Keychain": {
"DEK": {
"keyLength": 64,
"iterationCount": 10000,
"salt": "A2lYMmv0i8H3PlH7ejbma30G",
"hash": "sha2-512"
}
}
}

BIN
test/fixtures/ipfs1/datastore/000003.log vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs1/datastore/CURRENT vendored Normal file
View File

@ -0,0 +1 @@
MANIFEST-000002

0
test/fixtures/ipfs1/datastore/LOCK vendored Normal file
View File

1
test/fixtures/ipfs1/datastore/LOG vendored Normal file
View File

@ -0,0 +1 @@
2023/02/01-11:13:27.065492 171997000 Delete type=3 #1

Binary file not shown.

1
test/fixtures/ipfs1/datastore_spec vendored Normal file
View File

@ -0,0 +1 @@
{"mounts":[{"mountpoint":"/blocks","path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","type":"flatfs"},{"mountpoint":"/","path":"datastore","type":"levelds"}],"type":"mount"}

BIN
test/fixtures/ipfs1/pins/000003.log vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs1/pins/CURRENT vendored Normal file
View File

@ -0,0 +1 @@
MANIFEST-000002

0
test/fixtures/ipfs1/pins/LOCK vendored Normal file
View File

1
test/fixtures/ipfs1/pins/LOG vendored Normal file
View File

@ -0,0 +1 @@
2023/02/01-11:13:27.065506 170987000 Delete type=3 #1

BIN
test/fixtures/ipfs1/pins/MANIFEST-000002 vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs1/version vendored Normal file
View File

@ -0,0 +1 @@
12

View File

@ -0,0 +1,8 @@
ŽCome hang out in our IRC chat room if you have any questions.
Contact the ipfs dev team:
- Bugs: https://github.com/ipfs/go-ipfs/issues
- Help: irc.freenode.org/#ipfs
- Email: dev@ipfs.io
½

View File

@ -0,0 +1,9 @@
¿·Some helpful resources for finding your way around ipfs:
- quick-start: a quick show of various ipfs features.
- ipfs commands: a list of all commands
- ipfs --help: every command describes itself
- https://github.com/ipfs/go-ipfs -- the src repository
- #ipfs on irc.freenode.org -- the community irc channel
·

View File

@ -0,0 +1,115 @@
½ µ # 0.1 - Quick Start
This is a set of short examples with minimal explanation. It is meant as
a "quick start". Soon, we'll write a longer tour :-)
Add a file to ipfs:
echo "hello world" >hello
ipfs add hello
View it:
ipfs cat <the-hash-you-got-here>
Try a directory:
mkdir foo
mkdir foo/bar
echo "baz" > foo/baz
echo "baz" > foo/bar/baz
ipfs add -r foo
View things:
ipfs ls <the-hash-here>
ipfs ls <the-hash-here>/bar
ipfs cat <the-hash-here>/baz
ipfs cat <the-hash-here>/bar/baz
ipfs cat <the-hash-here>/bar
ipfs ls <the-hash-here>/baz
References:
ipfs refs <the-hash-here>
ipfs refs -r <the-hash-here>
ipfs refs --help
Get:
ipfs get <the-hash-here> -o foo2
diff foo foo2
Objects:
ipfs object get <the-hash-here>
ipfs object get <the-hash-here>/foo2
ipfs object --help
Pin + GC:
ipfs pin add <the-hash-here>
ipfs repo gc
ipfs ls <the-hash-here>
ipfs pin rm <the-hash-here>
ipfs repo gc
Daemon:
ipfs daemon (in another terminal)
ipfs id
Network:
(must be online)
ipfs swarm peers
ipfs id
ipfs cat <hash-of-remote-object>
Mount:
(warning: fuse is finicky!)
ipfs mount
cd /ipfs/<the-hash-here>
ls
Tool:
ipfs version
ipfs update
ipfs commands
ipfs config --help
open http://localhost:5001/webui
Browse:
webui:
http://localhost:5001/webui
video:
http://localhost:8080/ipfs/QmVc6zuAneKJzicnJpfrqCH9gSy6bz54JhcypfJYhGUFQu/play#/ipfs/QmTKZgRNwDNZwHtJSjCp6r5FYefzpULfy37JvMt9DwvXse
images:
http://localhost:8080/ipfs/QmZpc3HvfjEXvLWGQPWbHk3AjD5j8NEN4gmFN8Jmrd5g83/cs
markdown renderer app:
http://localhost:8080/ipfs/QmX7M9CiYXjVeFnkfVGf3y5ixTZ2ACeSGyL1vBJY1HvQPp/mdown
µ

View File

@ -0,0 +1,27 @@
Š IPFS Alpha Security Notes
We try hard to ensure our system is safe and robust, but all software
has bugs, especially new software. This distribution is meant to be an
alpha preview, don't use it for anything mission critical.
Please note the following:
- This is alpha software and has not been audited. It is our goal
to conduct a proper security audit once we close in on a 1.0 release.
- ipfs is a networked program, and may have serious undiscovered
vulnerabilities. It is written in Go, and we do not execute any
user provided data. But please point any problems out to us in a
github issue, or email security@ipfs.io privately.
- security@ipfs.io GPG key:
- 4B9665FB 92636D17 7C7A86D3 50AAE8A9 59B13AF3
- https://pgp.mit.edu/pks/lookup?op=get&search=0x50AAE8A959B13AF3
- ipfs uses encryption for all communication, but it's NOT PROVEN SECURE
YET! It may be totally broken. For now, the code is included to make
sure we benchmark our operations with encryption in mind. In the future,
there will be an "unsafe" mode for high performance intranet apps.
If this is a blocking feature for you, please contact us.
Š

View File

@ -0,0 +1,4 @@
.
" èžsÜL`•>¾P}ãÈD
>ÚŸo_¸=¡"´u'Òintro§


View File

@ -0,0 +1,3 @@
Index


View File

@ -0,0 +1,36 @@
¤œWIP
# 0.0 - Introduction
Welcome to IPFS! This tour will guide you through a few of the
features of this tool, and the most common commands. Then, it will
immerse you into the world of merkledags and the amazing things
you can do with them.
This tour has many parts, and can be taken in different sequences.
Different people learn different ways, so choose your own adventure:
To start with the concepts, try:
- The Merkle DAG
- Data Structures on the Merkle DAG
- Representing Files with unixfs
- add, cat, ls, refs
...
To start with the examples, try:
- add, cat, ls, refs
- Representing Files with unixfs
- Data Structures on the Merkle DAG
- The Merkle DAG
...
To start with the network, try:
- IPFS Nodes
- Running the daemon
- The Swarm
- The Web
œ

View File

@ -0,0 +1,28 @@
ЛГHello and Welcome to IPFS!
в–€в–€в•—в–€в–€в–€в–€в–€в–€в•— в–€в–€в–€в–€в–€в–€в–€в•—в–€в–€в–€в–€в–€в–€в–€в•—
в–€в–€в•‘в–€в–€в•”в•ђв•ђв–€в–€в•—в–€в–€в•”в•ђв•ђв•ђв•ђв•ќв–€в–€в•”в•ђв•ђв•ђв•ђв•ќ
в–€в–€в•‘в–€в–€в–€в–€в–€в–€в•”в•ќв–€в–€в–€в–€в–€в•— в–€в–€в–€в–€в–€в–€в–€в•—
в–€в–€в•‘в–€в–€в•”в•ђв•ђв•ђв•ќ в–€в–€в•”в•ђв•ђв•ќ в•љв•ђв•ђв•ђв•ђв–€в–€в•‘
в–€в–€в•‘в–€в–€в•‘ в–€в–€в•‘ в–€в–€в–€в–€в–€в–€в–€в•‘
в•љв•ђв•ќв•љв•ђв•ќ в•љв•ђв•ќ в•љв•ђв•ђв•ђв•ђв•ђв•ђв•ќ
If you're seeing this, you have successfully installed
IPFS and are now interfacing with the ipfs merkledag!
-------------------------------------------------------
| Warning: |
| This is alpha software. Use at your own discretion! |
| Much is missing or lacking polish. There are bugs. |
| Not yet secure. Read the security notes for more. |
-------------------------------------------------------
Check out some of the other files in this directory:
./about
./help
./quick-start <-- usage examples
./readme <-- this file
./security-notes
Г

View File

@ -0,0 +1,3 @@
-
" õR ;<3B>—¿­ˆfq<66>aU¿õ 0 [Xè@÷8Ó·O§¦index


View File

@ -0,0 +1 @@
/repo/flatfs/shard/v1/next-to-last/2

View File

@ -0,0 +1,54 @@
Œ IPFS -- Inter-Planetary File system
IPFS is a global, versioned, peer-to-peer filesystem. It combines good ideas
from Git, BitTorrent, Kademlia, SFS, and the Web. It is like a single bit-
torrent swarm, exchanging git objects. IPFS provides an interface as simple
as the HTTP web, but with permanence built in. You can also mount the world
at /ipfs.
IPFS is a protocol:
- defines a content-addressed file system
- coordinates content delivery
- combines Kademlia + BitTorrent + Git
IPFS is a filesystem:
- has directories and files
- mountable filesystem (via FUSE)
IPFS is a web:
- can be used to view documents like the web
- files accessible via HTTP at 'http://ipfs.io/<path>'
- browsers or extensions can learn to use 'ipfs://' directly
- hash-addressed content guarantees authenticity
IPFS is modular:
- connection layer over any network protocol
- routing layer
- uses a routing layer DHT (kademlia/coral)
- uses a path-based naming service
- uses bittorrent-inspired block exchange
IPFS uses crypto:
- cryptographic-hash content addressing
- block-level deduplication
- file integrity + versioning
- filesystem-level encryption + signing support
IPFS is p2p:
- worldwide peer-to-peer file transfers
- completely decentralized architecture
- **no** central point of failure
IPFS is a cdn:
- add a file to the filesystem locally, and it's now available to the world
- caching-friendly (content-hash naming)
- bittorrent-based bandwidth distribution
IPFS has a name service:
- IPNS, an SFS inspired name system
- global namespace based on PKI
- serves to build trust chains
- compatible with other NSes
- can map DNS, .onion, .bit, etc to IPNS
Œ

22
test/fixtures/ipfs2/blocks/_README.data vendored Normal file
View File

@ -0,0 +1,22 @@
This is a repository of IPLD objects. Each IPLD object is in a single file,
named <base32 encoding of cid>.data. Where <base32 encoding of cid> is the
"base32" encoding of the CID (as specified in
https://github.com/multiformats/multibase) without the 'B' prefix.
All the object files are placed in a tree of directories, based on a
function of the CID. This is a form of sharding similar to
the objects directory in git repositories. Previously, we used
prefixes, we now use the next-to-last two charters.
func NextToLast(base32cid string) {
nextToLastLen := 2
offset := len(base32cid) - nextToLastLen - 1
return str[offset : offset+nextToLastLen]
}
For example, an object with a base58 CIDv1 of
zb2rhYSxw4ZjuzgCnWSt19Q94ERaeFhu9uSqRgjSdx9bsgM6f
has a base32 CIDv1 of
BAFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA
and will be placed at
SC/AFKREIA22FLID5AJ2KU7URG47MDLROZIH6YF2KALU2PWEFPVI37YLKRSCA.data
with 'SC' being the last-to-next two characters and the 'B' at the
beginning of the CIDv1 string is the multibase prefix that is not
stored in the filename.

82
test/fixtures/ipfs2/config vendored Normal file
View File

@ -0,0 +1,82 @@
{
"Addresses": {
"Swarm": [
"/ip4/0.0.0.0/tcp/0"
],
"Announce": [],
"NoAnnounce": [],
"API": "/ip4/127.0.0.1/tcp/0",
"Gateway": "/ip4/0.0.0.0/tcp/0",
"RPC": "/ip4/127.0.0.1/tcp/5003",
"Delegates": [
"/dns4/node0.delegate.ipfs.io/tcp/443/https",
"/dns4/node1.delegate.ipfs.io/tcp/443/https",
"/dns4/node2.delegate.ipfs.io/tcp/443/https",
"/dns4/node3.delegate.ipfs.io/tcp/443/https"
]
},
"Discovery": {
"MDNS": {
"Enabled": true,
"Interval": 0
},
"webRTCStar": {
"Enabled": false
}
},
"Bootstrap": [],
"Pubsub": {
"Router": "gossipsub",
"Enabled": true
},
"Swarm": {
"ConnMgr": {
"LowWater": 50,
"HighWater": 200
},
"DisableNatPortMap": false
},
"Routing": {
"Type": "dhtclient"
},
"Identity": {
"PeerID": "12D3KooWP5QaQL2omcRK7FLkWSHCmnUF9b17AJf4YG73vLJx2tnQ",
"PrivKey": "CAESQP57AculF+mQdCi+Pw2lC347p9qcNYW35zA9VQTxMftTxQJdZAH/irzFp8r/0kt5LrD6mjGUjciPV0PVY/n7xe0="
},
"Datastore": {
"Spec": {
"type": "mount",
"mounts": [
{
"mountpoint": "/blocks",
"type": "measure",
"prefix": "flatfs.datastore",
"child": {
"type": "flatfs",
"path": "blocks",
"sync": true,
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2"
}
},
{
"mountpoint": "/",
"type": "measure",
"prefix": "leveldb.datastore",
"child": {
"type": "levelds",
"path": "datastore",
"compression": "none"
}
}
]
}
},
"Keychain": {
"DEK": {
"keyLength": 64,
"iterationCount": 10000,
"salt": "DMg1WBkXaZWg7Ez0RSxvZXl6",
"hash": "sha2-512"
}
}
}

BIN
test/fixtures/ipfs2/datastore/000003.log vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs2/datastore/CURRENT vendored Normal file
View File

@ -0,0 +1 @@
MANIFEST-000002

0
test/fixtures/ipfs2/datastore/LOCK vendored Normal file
View File

1
test/fixtures/ipfs2/datastore/LOG vendored Normal file
View File

@ -0,0 +1 @@
2023/02/01-11:13:27.250433 17118f000 Delete type=3 #1

Binary file not shown.

1
test/fixtures/ipfs2/datastore_spec vendored Normal file
View File

@ -0,0 +1 @@
{"mounts":[{"mountpoint":"/blocks","path":"blocks","shardFunc":"/repo/flatfs/shard/v1/next-to-last/2","type":"flatfs"},{"mountpoint":"/","path":"datastore","type":"levelds"}],"type":"mount"}

BIN
test/fixtures/ipfs2/pins/000003.log vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs2/pins/CURRENT vendored Normal file
View File

@ -0,0 +1 @@
MANIFEST-000002

0
test/fixtures/ipfs2/pins/LOCK vendored Normal file
View File

1
test/fixtures/ipfs2/pins/LOG vendored Normal file
View File

@ -0,0 +1 @@
2023/02/01-11:13:27.250612 171997000 Delete type=3 #1

BIN
test/fixtures/ipfs2/pins/MANIFEST-000002 vendored Normal file

Binary file not shown.

1
test/fixtures/ipfs2/version vendored Normal file
View File

@ -0,0 +1 @@
12

28
test/fixtures/orbit-db-identity-keys.js vendored Normal file
View File

@ -0,0 +1,28 @@
import userA from "./keys/identity-keys/03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c.json" assert { type: "json" }
import userB from "./keys/identity-keys/0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6.json" assert { type: "json" }
import userC from "./keys/identity-keys/032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214.json" assert { type: "json" }
import userD from "./keys/identity-keys/02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260.json" assert { type: "json" }
import userA_ from "./keys/signing-keys/userA.json" assert { type: "json" }
import userB_ from "./keys/signing-keys/userB.json" assert { type: "json" }
import userC_ from "./keys/signing-keys/userC.json" assert { type: "json" }
import userD_ from "./keys/signing-keys/userD.json" assert { type: "json" }
const identityKeys = {
'03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c': userA,
'0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6': userB,
'032f7b6ef0432b572b45fcaf27e7f6757cd4123ff5c5266365bec82129b8c5f214': userC,
'02a38336e3a47f545a172c9f77674525471ebeda7d6c86140e7a778f67ded92260': userD,
}
const signingKeys = {
userA: userA_,
userB: userB_,
userC: userC_,
userD: userD_,
}
export {
identityKeys,
signingKeys
}

322
test/kv.spec.js Normal file
View File

@ -0,0 +1,322 @@
import { deepStrictEqual, strictEqual } from 'assert'
import rimraf from 'rimraf'
import * as Log from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
import KeyValueStore from '../src/kv.js'
// import KeyValueStorePersisted from '../src/kv-persisted.js'
import Database from '../src/database.js'
// Test utils
import { config, testAPIs, getIpfsPeerId, waitForPeers, startIpfs, stopIpfs, connectPeers } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
Object.keys(testAPIs).forEach((IPFS) => {
describe('KeyValue Database (' + IPFS + ')', function () {
this.timeout(config.timeout)
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeystore
let peerId1, peerId2
let testIdentity1, testIdentity2
let kv1, kv2
const databaseId = 'kv-AAA'
before(async () => {
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
keystore = new Keystore('./keys_1')
await keystore.open()
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)
}
signingKeystore = new Keystore('./keys_2')
await signingKeystore.open()
for (const [key, value] of Object.entries(signingKeys)) {
await signingKeystore.addKey(key, value)
}
// Create an identity for each peers
testIdentity1 = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
})
after(async () => {
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
if (testIdentity1) {
rmrf(testIdentity1.id)
}
if (testIdentity2) {
rmrf(testIdentity2.id)
}
rmrf('./orbitdb')
rmrf('./keys_1')
rmrf('./keys_2')
})
afterEach(async () => {
if (kv1) {
await kv1.close()
}
if (kv2) {
await kv2.close()
}
})
describe('using database', () => {
it('returns all entries in the database', async () => {
// let error
let updateCount = 0
let syncCount = 0
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
const onUpdate = (entry) => {
updateCount++
}
const onSync = (entry) => {
syncCount++
}
const onError = () => {
// error = err
}
kv1 = await KeyValueStore({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await KeyValueStore({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
// kv1 = await KeyValueStorePersisted({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
// kv2 = await KeyValueStorePersisted({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
kv1.events.on('update', onUpdate)
kv2.events.on('update', onUpdate)
kv1.events.on('sync', onSync)
kv2.events.on('sync', onSync)
kv1.events.on('error', onError)
kv2.events.on('error', onError)
strictEqual(kv1.type, 'kv')
strictEqual(kv2.type, 'kv')
// await waitForPeers(ipfs1, [peerId2], databaseId)
// await waitForPeers(ipfs2, [peerId1], databaseId)
// send a garbage message to pubsub to test onError firing
// await ipfs1.pubsub.publish(databaseId, Uint8Array.from([1, 2, 3, 4, 5]))
await kv1.set('init', true)
await kv1.set('hello', 'friend')
await kv1.del('hello')
await kv1.set('hello', 'friend2')
await kv1.del('hello')
await kv1.set('empty', '')
await kv1.del('empty')
await kv1.set('hello', 'friend3')
// const hash = await kv1.set('hello', 'friend3')
// const lastEntry = await kv1.database.log.get(hash)
const sleep = (time) => new Promise((resolve) => {
setTimeout(() => {
resolve()
}, time)
})
await sleep(1000) // give some time for ipfs peers to sync
// sync() test
// console.time('sync')
// await kv2.sync(lastEntry.bytes)
// console.timeEnd('sync')
// write access test
// let errorMessage
// try {
// await kv2.set('hello', 'friend4')
// } catch (e) {
// errorMessage = e.message
// } finally {
// const valueNotUpdated = await kv2.get('hello')
// strictEqual(valueNotUpdated, 'friend3')
// notStrictEqual(errorMessage, undefined)
// strictEqual(errorMessage.startsWith('Could not append entry:\nKey'), true)
// }
// get() test
console.time('get')
const value0 = await kv2.get('init')
console.timeEnd('get')
console.log(value0)
deepStrictEqual(value0, true)
const value2 = await kv2.get('hello')
console.log(value2)
deepStrictEqual(value2, 'friend3')
const value1 = await kv1.get('hello')
console.log(value1)
deepStrictEqual(value1, 'friend3')
const value9 = await kv1.get('empty')
console.log(value9)
deepStrictEqual(value9, undefined)
// all() test
const all2 = []
console.time('all2')
for await (const keyValue of kv2.iterator()) {
console.log('>', keyValue)
all2.push(keyValue)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
const all1 = []
console.time('all1')
for await (const keyValue of kv1.iterator()) {
console.log('>', keyValue)
all1.push(keyValue)
}
console.timeEnd('all1')
deepStrictEqual(all1, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
// onError test
// notStrictEqual(error, undefined)
// strictEqual(error.message, 'CBOR decode error: too many terminals, data makes no sense')
// update event test
strictEqual(updateCount, 8 * 2)
// sync event test
strictEqual(syncCount, 8)
})
})
describe('load database', () => {
it('returns all entries in the database', async () => {
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
kv1 = await KeyValueStore({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await KeyValueStore({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
// kv1 = await KeyValueStorePersisted({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
// kv2 = await KeyValueStorePersisted({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
let syncCount = 0
kv2.events.on('sync', (entry) => {
++syncCount
})
await kv1.set('init', true)
await kv1.set('hello', 'friend')
await kv1.del('hello')
await kv1.set('hello', 'friend2')
await kv1.del('hello')
await kv1.set('empty', '')
await kv1.del('empty')
await kv1.set('hello', 'friend3')
// const hash = await kv1.set('hello', 'friend3')
// const lastEntry = await kv1.log.get(hash)
const sleep = (time) => new Promise((resolve) => {
setTimeout(() => {
resolve()
}, time)
})
await sleep(1000) // give some time for ipfs peers to sync
// sync() test
// console.time('sync')
// await kv2.sync(lastEntry.bytes)
// console.timeEnd('sync')
await kv1.close()
await kv2.close()
kv1 = await KeyValueStore({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
kv2 = await KeyValueStore({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
// kv1 = await KeyValueStorePersisted({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs1, identity: testIdentity1, databaseId, accessController })
// kv2 = await KeyValueStorePersisted({ KeyValue: KeyValueStore, OpLog: Log, Database, ipfs: ipfs2, identity: testIdentity2, databaseId, accessController })
console.time('get')
const value0 = await kv2.get('init')
console.timeEnd('get')
console.log(value0)
deepStrictEqual(value0, true)
const value2 = await kv2.get('hello')
console.log(value2)
deepStrictEqual(value2, 'friend3')
const value1 = await kv1.get('hello')
console.log(value1)
deepStrictEqual(value1, 'friend3')
const value9 = await kv1.get('empty')
console.log(value9)
deepStrictEqual(value9, undefined)
const all2 = []
console.time('all2')
for await (const keyValue of kv2.iterator()) {
console.log('>', keyValue)
all2.push(keyValue)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
const all1 = []
console.time('all1')
for await (const keyValue of kv1.iterator()) {
console.log('>', keyValue)
all1.push(keyValue)
}
console.timeEnd('all1')
deepStrictEqual(all1, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
strictEqual(syncCount, 8)
})
})
})
})

View File

@ -41,15 +41,17 @@ Object.keys(testAPIs).forEach((IPFS) => {
await signingKeystore.close()
})
describe('append', () => {
describe('append', async () => {
describe('append one', async () => {
let log
let values = []
let heads = []
before(async () => {
log = Log(testIdentity, 'A')
log = await Log(testIdentity, { logId: 'A' })
await log.append('hello1')
values = await log.values()
heads = await log.heads()
})
it('added the correct amount of items', () => {
@ -69,7 +71,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('has the correct heads', async () => {
log.heads().forEach((head) => {
heads.forEach((head) => {
strictEqual(head.hash, values[0].hash)
})
})
@ -88,16 +90,20 @@ Object.keys(testAPIs).forEach((IPFS) => {
let log
let values = []
let heads = []
before(async () => {
log = Log(testIdentity, 'A')
log = await Log(testIdentity, { logId: 'A' })
for (let i = 0; i < amount; i++) {
await log.append('hello' + i, { pointerCount: nextPointerAmount })
// Make sure the log has the right heads after each append
values = await log.values()
strictEqual(log.heads().length, 1)
deepStrictEqual(log.heads()[0], values[values.length - 1])
}
values = await log.values()
heads = await log.heads()
})
it('set the correct heads', () => {
strictEqual(heads.length, 1)
deepStrictEqual(heads[0], values[values.length - 1])
})
it('added the correct amount of items', () => {

View File

@ -1,17 +1,17 @@
import { strictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log, MemoryStorage, LRUStorage, IPFSBlockStorage } from '../src/log.js'
import { Log } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import { config, testAPIs } from 'orbit-db-test-utils'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
let ipfsd, ipfs, testIdentity, testIdentity2, testIdentity3
let testIdentity, testIdentity2, testIdentity3
Object.keys(testAPIs).forEach((IPFS) => {
describe('Log - CRDT (' + IPFS + ')', function () {
@ -33,32 +33,28 @@ Object.keys(testAPIs).forEach((IPFS) => {
testIdentity = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
testIdentity3 = await createIdentity({ id: 'userC', keystore, signingKeystore })
ipfsd = await startIpfs(IPFS, config.defaultIpfsConfig)
ipfs = ipfsd.api
})
after(async () => {
await stopIpfs(ipfsd)
await keystore.close()
await signingKeystore.close()
await keystore.close()
rmrf(identityKeysPath)
rmrf(signingKeysPath)
})
describe('is a CRDT', () => {
describe('is a CRDT', async () => {
const logId = 'X'
let log1, log2, log3
beforeEach(async () => {
log1 = Log(testIdentity, { logId })
log2 = Log(testIdentity2, { logId })
log3 = Log(testIdentity3, { logId })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
log3 = await Log(testIdentity3, { logId })
})
it('join is associative', async () => {
const expectedElementsCount = 6
const storage = MemoryStorage()
await log1.append('helloA1')
await log1.append('helloA2')
@ -73,9 +69,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
const res1 = await log1.values()
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
log3 = await Log(testIdentity3, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -97,7 +93,6 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('join is commutative', async () => {
const expectedElementsCount = 4
const storage = LRUStorage()
await log1.append('helloA1')
await log1.append('helloA2')
@ -108,8 +103,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log2.join(log1)
const res1 = await log2.values()
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -126,11 +121,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('multiple joins are commutative', async () => {
const storage = LRUStorage(MemoryStorage())
// b + a == a + b
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -138,8 +131,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log2.join(log1)
const resA1 = await log2.values()
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -150,8 +143,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
deepStrictEqual(resA1.map(e => e.hash), resA2.map(e => e.hash))
// a + b == b + a
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -159,8 +152,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.join(log2)
const resB1 = await log1.values()
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -171,8 +164,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
deepStrictEqual(resB1.map(e => e.hash), resB2.map(e => e.hash))
// a + c == c + a
log1 = Log(testIdentity, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log1 = await Log(testIdentity, { logId })
log3 = await Log(testIdentity3, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log3.append('helloC1')
@ -180,8 +173,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log3.join(log1)
const resC1 = await log3.values()
log1 = Log(testIdentity, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log1 = await Log(testIdentity, { logId })
log3 = await Log(testIdentity3, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log3.append('helloC1')
@ -192,8 +185,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
deepStrictEqual(resC1.map(e => e.hash), resC2.map(e => e.hash))
// c + b == b + c
log2 = Log(testIdentity2, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log2 = await Log(testIdentity2, { logId })
log3 = await Log(testIdentity3, { logId })
await log2.append('helloB1')
await log2.append('helloB2')
@ -202,8 +195,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log3.join(log2)
const resD1 = await log3.values()
log2 = Log(testIdentity2, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log2 = await Log(testIdentity2, { logId })
log3 = await Log(testIdentity3, { logId })
await log2.append('helloB1')
await log2.append('helloB2')
await log3.append('helloC1')
@ -214,9 +207,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
deepStrictEqual(resD1.map(e => e.hash), resD2.map(e => e.hash))
// a + b + c == c + b + a
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
log3 = await Log(testIdentity3, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -227,9 +220,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.join(log3)
const logLeft = await log1.values()
log1 = Log(testIdentity, { logId, storage })
log2 = Log(testIdentity2, { logId, storage })
log3 = Log(testIdentity3, { logId, storage })
log1 = await Log(testIdentity, { logId })
log2 = await Log(testIdentity2, { logId })
log3 = await Log(testIdentity3, { logId })
await log1.append('helloA1')
await log1.append('helloA2')
await log2.append('helloB1')
@ -244,10 +237,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('join is idempotent', async () => {
const storage = IPFSBlockStorage(MemoryStorage(LRUStorage()), { ipfs })
const expectedElementsCount = 3
const logA = Log(testIdentity, { logId, storage })
const logA = await Log(testIdentity, { logId })
await logA.append('helloA1')
await logA.append('helloA2')
await logA.append('helloA3')

View File

@ -46,28 +46,28 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('finds one head after one entry', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
strictEqual(log1.heads().length, 1)
strictEqual((await log1.heads()).length, 1)
})
it('finds one head after two entries', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
await log1.append('helloA2')
strictEqual(log1.heads().length, 1)
strictEqual((await log1.heads()).length, 1)
})
it('latest entry is the the head', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
const entry = await log1.append('helloA2')
deepStrictEqual(entry.hash, log1.heads()[0].hash)
deepStrictEqual(entry.hash, (await log1.heads())[0].hash)
})
it('finds head after a join and append', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
await log1.append('helloA2')
@ -77,13 +77,14 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log2.append('helloB2')
const expectedHead = last(await log2.values())
strictEqual(log2.heads().length, 1)
deepStrictEqual(log2.heads()[0].hash, expectedHead.hash)
const heads = await log2.heads()
strictEqual(heads.length, 1)
deepStrictEqual(heads[0].hash, expectedHead.hash)
})
it('finds two heads after a join', async () => {
const log2 = Log(testIdentity, { logId: 'A' })
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
await log1.append('helloA2')
@ -95,15 +96,15 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.join(log2)
const heads = log1.heads()
const heads = await log1.heads()
strictEqual(heads.length, 2)
strictEqual(heads[0].hash, expectedHead2.hash)
strictEqual(heads[1].hash, expectedHead1.hash)
})
it('finds two heads after two joins', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
await log1.append('helloA2')
@ -122,16 +123,16 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.join(log2)
const heads = log1.heads()
const heads = await log1.heads()
strictEqual(heads.length, 2)
strictEqual(heads[0].hash, expectedHead1.hash)
strictEqual(heads[1].hash, expectedHead2.hash)
})
it('finds two heads after three joins', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity, { logId: 'A' })
const log3 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity, { logId: 'A' })
const log3 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
await log1.append('helloA2')
@ -148,16 +149,16 @@ Object.keys(testAPIs).forEach((IPFS) => {
const expectedHead2 = last(await log2.values())
await log1.join(log2)
const heads = log1.heads()
const heads = await log1.heads()
strictEqual(heads.length, 2)
strictEqual(heads[0].hash, expectedHead1.hash)
strictEqual(heads[1].hash, expectedHead2.hash)
})
it('finds three heads after three joins', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity, { logId: 'A' })
const log3 = Log(testIdentity, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity, { logId: 'A' })
const log3 = await Log(testIdentity, { logId: 'A' })
await log1.append('helloA1')
await log1.append('helloA2')
@ -175,7 +176,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.join(log2)
await log1.join(log3)
const heads = log1.heads()
const heads = await log1.heads()
strictEqual(heads.length, 3)
deepStrictEqual(heads[0].hash, expectedHead1.hash)
deepStrictEqual(heads[1].hash, expectedHead2.hash)

View File

@ -1,7 +1,6 @@
import { strictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log, MemoryStorage } from '../src/log.js'
import { Log } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
import LogCreator from './utils/log-creator.js'
@ -9,29 +8,33 @@ import all from 'it-all'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
const { createLogWithSixteenEntries } = LogCreator
let ipfsd, ipfs, testIdentity, testIdentity2, testIdentity3
Object.keys(testAPIs).forEach((IPFS) => {
describe('Log - Iterator (' + IPFS + ')', function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let ipfs
let ipfsd
let keystore, signingKeystore
let testIdentity, testIdentity2, testIdentity3
before(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, signingKeysPath)
keystore = new Keystore('./keys_1')
await keystore.open()
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)
}
keystore = new Keystore(identityKeysPath)
signingKeystore = new Keystore(signingKeysPath)
signingKeystore = new Keystore('./keys_2')
await signingKeystore.open()
for (const [key, value] of Object.entries(signingKeys)) {
await signingKeystore.addKey(key, value)
}
testIdentity = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
@ -41,23 +44,27 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
after(async () => {
await stopIpfs(ipfsd)
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await keystore.close()
await signingKeystore.close()
if (ipfsd) {
await stopIpfs(ipfsd)
}
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
rmrf('./keys_1')
rmrf('./keys_2')
})
describe('Basic iterator functionality', () => {
describe('Basic iterator functionality', async () => {
let log1
let startHash
const hashes = []
const logSize = 100
const storage = MemoryStorage()
beforeEach(async () => {
log1 = Log(testIdentity, { logId: 'X', storage })
log1 = await Log(testIdentity, { logId: 'X' })
for (let i = 0; i < logSize; i++) {
const entry = await log1.append('entry' + i)
@ -395,17 +402,18 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
})
describe('Iteration over forked/joined logs', () => {
let fixture, identities
describe('Iteration over forked/joined logs', async () => {
let fixture, identities, heads
before(async () => {
identities = [testIdentity3, testIdentity2, testIdentity3, testIdentity]
fixture = await createLogWithSixteenEntries(Log, ipfs, identities)
heads = await fixture.log.heads()
})
it('returns the full length from all heads', async () => {
const it = fixture.log.iterator({
lte: fixture.log.heads()
lte: heads
})
const result = await all(it)
@ -415,7 +423,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('returns partial entries from all heads', async () => {
const it = fixture.log.iterator({
lte: fixture.log.heads(),
lte: heads,
amount: 6
})
@ -427,7 +435,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('returns partial logs from single heads #1', async () => {
const it = fixture.log.iterator({
lte: [fixture.log.heads()[0]]
lte: [heads[0]]
})
const result = await all(it)
@ -437,7 +445,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('returns partial logs from single heads #2', async () => {
const it = fixture.log.iterator({
lte: [fixture.log.heads()[1]]
lte: [heads[1]]
})
const result = await all(it)

View File

@ -45,8 +45,8 @@ Object.keys(testAPIs).forEach(IPFS => {
let log1, log2
before(async () => {
log1 = Log(testIdentity, { logId: 'A' })
log2 = Log(testIdentity2, { logId: 'A' })
log1 = await Log(testIdentity, { logId: 'A' })
log2 = await Log(testIdentity2, { logId: 'A' })
})
it('joins consistently', async () => {

View File

@ -1,6 +1,5 @@
import { strictEqual, notStrictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import Clock from '../src/lamport-clock.js'
import { Log } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
@ -8,33 +7,35 @@ import Keystore from '../src/Keystore.js'
// Test utils
import { config, testAPIs } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
let testIdentity, testIdentity2, testIdentity3, testIdentity4
const last = (arr) => {
return arr[arr.length - 1]
}
Object.keys(testAPIs).forEach((IPFS) => {
describe('Log - Join (' + IPFS + ')', function () {
describe('Log - Join (' + IPFS + ')', async function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let keystore, signingKeystore
let log1, log2, log3, log4
let testIdentity, testIdentity2, testIdentity3, testIdentity4
before(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, signingKeysPath)
keystore = new Keystore('./keys_1')
await keystore.open()
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)
}
keystore = new Keystore(identityKeysPath)
signingKeystore = new Keystore(signingKeysPath)
signingKeystore = new Keystore('./keys_2')
await signingKeystore.open()
for (const [key, value] of Object.entries(signingKeys)) {
await signingKeystore.addKey(key, value)
}
testIdentity = await createIdentity({ id: 'userC', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
@ -43,18 +44,21 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
after(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await keystore.close()
await signingKeystore.close()
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
rmrf('./keys_1')
rmrf('./keys_2')
})
beforeEach(async () => {
log1 = Log(testIdentity, { logId: 'X' })
log2 = Log(testIdentity2, { logId: 'X' })
log3 = Log(testIdentity3, { logId: 'X' })
log4 = Log(testIdentity4, { logId: 'X' })
log1 = await Log(testIdentity, { logId: 'X' })
log2 = await Log(testIdentity2, { logId: 'X' })
log3 = await Log(testIdentity3, { logId: 'X' })
log4 = await Log(testIdentity4, { logId: 'X' })
})
it('joins logs', async () => {
@ -83,7 +87,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
const valuesC = await log1.values()
strictEqual(valuesC.length, amount * 2)
strictEqual(log1.heads().length, 2)
strictEqual((await log1.heads()).length, 2)
})
it('throws an error if first log is not defined', async () => {
@ -113,9 +117,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
const logIdB = 'BBB'
let err
try {
const logA = Log(testIdentity, { logId: logIdA })
const logA = await Log(testIdentity, { logId: logIdA })
await logA.append('entryA')
const logB = Log(testIdentity, { logId: logIdB })
const logB = await Log(testIdentity, { logId: logIdB })
await logB.append('entryB')
const valuesB = await logB.values()
await logA.joinEntry(last(valuesB))
@ -131,9 +135,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
const logIdB = 'BBB'
let err
try {
const logA = Log(testIdentity, { logId: logIdA })
const logA = await Log(testIdentity, { logId: logIdA })
await logA.append('entryA')
const logB = Log(testIdentity, { logId: logIdB })
const logB = await Log(testIdentity, { logId: logIdB })
await logB.append('entryB')
await logA.join(logB)
} catch (e) {
@ -223,35 +227,42 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('joins 2 logs two ways and has the right heads at every step', async () => {
await log1.append('helloA1')
strictEqual(log1.heads().length, 1)
strictEqual(log1.heads()[0].payload, 'helloA1')
const heads1 = await log1.heads()
strictEqual(heads1.length, 1)
strictEqual(heads1[0].payload, 'helloA1')
await log2.append('helloB1')
strictEqual(log2.heads().length, 1)
strictEqual(log2.heads()[0].payload, 'helloB1')
const heads2 = await log2.heads()
strictEqual(heads2.length, 1)
strictEqual(heads2[0].payload, 'helloB1')
await log2.join(log1)
strictEqual(log2.heads().length, 2)
strictEqual(log2.heads()[0].payload, 'helloB1')
strictEqual(log2.heads()[1].payload, 'helloA1')
const heads3 = await log2.heads()
strictEqual(heads3.length, 2)
strictEqual(heads3[0].payload, 'helloB1')
strictEqual(heads3[1].payload, 'helloA1')
await log1.join(log2)
strictEqual(log1.heads().length, 2)
strictEqual(log1.heads()[0].payload, 'helloB1')
strictEqual(log1.heads()[1].payload, 'helloA1')
const heads4 = await log1.heads()
strictEqual(heads4.length, 2)
strictEqual(heads4[0].payload, 'helloB1')
strictEqual(heads4[1].payload, 'helloA1')
await log1.append('helloA2')
strictEqual(log1.heads().length, 1)
strictEqual(log1.heads()[0].payload, 'helloA2')
const heads5 = await log1.heads()
strictEqual(heads5.length, 1)
strictEqual(heads5[0].payload, 'helloA2')
await log2.append('helloB2')
strictEqual(log2.heads().length, 1)
strictEqual(log2.heads()[0].payload, 'helloB2')
const heads6 = await log2.heads()
strictEqual(heads6.length, 1)
strictEqual(heads6[0].payload, 'helloB2')
await log2.join(log1)
strictEqual(log2.heads().length, 2)
strictEqual(log2.heads()[0].payload, 'helloB2')
strictEqual(log2.heads()[1].payload, 'helloA2')
const heads7 = await log2.heads()
strictEqual(heads7.length, 2)
strictEqual(heads7[0].payload, 'helloB2')
strictEqual(heads7[1].payload, 'helloA2')
})
it('joins 4 logs to one', async () => {
@ -319,15 +330,15 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.append('helloA2')
await log2.append('helloB2')
strictEqual(log1.clock().id, testIdentity.publicKey)
strictEqual(log2.clock().id, testIdentity2.publicKey)
strictEqual(log1.clock().time, 2)
strictEqual(log2.clock().time, 2)
strictEqual((await log1.clock()).id, testIdentity.publicKey)
strictEqual((await log2.clock()).id, testIdentity2.publicKey)
strictEqual((await log1.clock()).time, 2)
strictEqual((await log2.clock()).time, 2)
await log3.join(log1)
strictEqual(log3.id, 'X')
strictEqual(log3.clock().id, testIdentity3.publicKey)
strictEqual(log3.clock().time, 2)
strictEqual((await log3.clock()).id, testIdentity3.publicKey)
strictEqual((await log3.clock()).time, 2)
await log3.append('helloC1')
await log3.append('helloC2')
@ -346,11 +357,11 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log4.append('helloD5')
await log1.append('helloA5')
await log4.join(log1)
deepStrictEqual(log4.clock().id, testIdentity4.publicKey)
deepStrictEqual(log4.clock().time, 7)
strictEqual((await log4.clock()).id, testIdentity4.publicKey)
strictEqual((await log4.clock()).time, 7)
await log4.append('helloD6')
deepStrictEqual(log4.clock().time, 8)
strictEqual((await log4.clock()).time, 8)
const expectedData = [
{ payload: 'helloA1', id: 'X', clock: new Clock(testIdentity.publicKey, 1) },
@ -388,13 +399,13 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log1.join(log3)
strictEqual(log1.id, 'X')
strictEqual(log1.clock().id, testIdentity.publicKey)
strictEqual(log1.clock().time, 2)
strictEqual((await log1.clock()).id, testIdentity.publicKey)
strictEqual((await log1.clock()).time, 2)
await log3.join(log1)
strictEqual(log3.id, 'X')
strictEqual(log3.clock().id, testIdentity3.publicKey)
strictEqual(log3.clock().time, 2)
strictEqual((await log3.clock()).id, testIdentity3.publicKey)
strictEqual((await log3.clock()).time, 2)
await log3.append('helloC1')
await log3.append('helloC2')
@ -408,8 +419,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
await log4.append('helloD3')
await log4.append('helloD4')
strictEqual(log4.clock().id, testIdentity4.publicKey)
strictEqual(log4.clock().time, 6)
strictEqual((await log4.clock()).id, testIdentity4.publicKey)
strictEqual((await log4.clock()).time, 6)
const expectedData = [
'helloA1',

View File

@ -74,7 +74,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
await signingKeystore.close()
})
describe('fromJSON', () => {
describe('fromJSON', async () => {
let identities
before(async () => {
@ -258,9 +258,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('retrieves partial log from an entry hash', async () => {
const log1 = Log(testIdentity, { logId: 'X' })
const log2 = Log(testIdentity2, { logId: 'X' })
const log3 = Log(testIdentity3, { logId: 'X' })
const log1 = await Log(testIdentity, { logId: 'X' })
const log2 = await Log(testIdentity2, { logId: 'X' })
const log3 = await Log(testIdentity3, { logId: 'X' })
const items1 = []
const items2 = []
const items3 = []
@ -305,9 +305,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('retrieves full log from an entry hash', async () => {
const log1 = Log(testIdentity, { logId: 'X' })
const log2 = Log(testIdentity2, { logId: 'X' })
const log3 = Log(testIdentity3, { logId: 'X' })
const log1 = await Log(testIdentity, { logId: 'X' })
const log2 = await Log(testIdentity2, { logId: 'X' })
const log3 = await Log(testIdentity3, { logId: 'X' })
const items1 = []
const items2 = []
const items3 = []
@ -338,9 +338,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('retrieves full log from an entry hash 2', async () => {
const log1 = Log(testIdentity, { logId: 'X' })
const log2 = Log(testIdentity2, { logId: 'X' })
const log3 = Log(testIdentity3, { logId: 'X' })
const log1 = await Log(testIdentity, { logId: 'X' })
const log2 = await Log(testIdentity2, { logId: 'X' })
const log3 = await Log(testIdentity3, { logId: 'X' })
const items1 = []
const items2 = []
const items3 = []
@ -371,9 +371,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('retrieves full log from an entry hash 3', async () => {
const log1 = Log(testIdentity, { logId: 'X' })
const log2 = Log(testIdentity2, { logId: 'X' })
const log3 = Log(testIdentity4, { logId: 'X' })
const log1 = await Log(testIdentity, { logId: 'X' })
const log2 = await Log(testIdentity2, { logId: 'X' })
const log3 = await Log(testIdentity4, { logId: 'X' })
const items1 = []
const items2 = []
const items3 = []
@ -472,7 +472,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
deepStrictEqual(c.values.map(e => e.payload), tmp)
// make sure logX comes after A, B and C
const logX = Log(testIdentity4, { logId: 'X' })
const logX = await Log(testIdentity4, { logId: 'X' })
await logX.append('1')
await logX.append('2')
await logX.append('3')
@ -494,9 +494,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('retrieves full log of randomly joined log', async () => {
const log1 = Log(testIdentity, { logId: 'X' })
const log2 = Log(testIdentity3, { logId: 'X' })
const log3 = Log(testIdentity4, { logId: 'X' })
const log1 = await Log(testIdentity, { logId: 'X' })
const log2 = await Log(testIdentity3, { logId: 'X' })
const log3 = await Log(testIdentity4, { logId: 'X' })
for (let i = 1; i <= 5; i++) {
await log1.append('entryA' + i)
@ -531,10 +531,10 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('retrieves randomly joined log deterministically', async () => {
const logA = Log(testIdentity, { logId: 'X' })
const logB = Log(testIdentity3, { logId: 'X' })
const log3 = Log(testIdentity4, { logId: 'X' })
const log = Log(testIdentity2, { logId: 'X' })
const logA = await Log(testIdentity, { logId: 'X' })
const logB = await Log(testIdentity3, { logId: 'X' })
const log3 = await Log(testIdentity4, { logId: 'X' })
const log = await Log(testIdentity2, { logId: 'X' })
for (let i = 1; i <= 5; i++) {
await logA.append('entryA' + i)
@ -659,10 +659,10 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('retrieves partially joined log deterministically - single next pointer', async () => {
const nextPointerAmount = 1
const logA = Log(testIdentity, { logId: 'X' })
const logB = Log(testIdentity3, { logId: 'X' })
const log3 = Log(testIdentity4, { logId: 'X' })
const log = Log(testIdentity2, { logId: 'X' })
const logA = await Log(testIdentity, { logId: 'X' })
const logB = await Log(testIdentity3, { logId: 'X' })
const log3 = await Log(testIdentity4, { logId: 'X' })
const log = await Log(testIdentity2, { logId: 'X' })
for (let i = 1; i <= 5; i++) {
await logA.append('entryA' + i, nextPointerAmount)
@ -723,10 +723,10 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('retrieves partially joined log deterministically - multiple next pointers', async () => {
const nextPointersAmount = 64
const logA = Log(testIdentity, { logId: 'X' })
const logB = Log(testIdentity3, { logId: 'X' })
const log3 = Log(testIdentity4, { logId: 'X' })
const log = Log(testIdentity2, { logId: 'X' })
const logA = await Log(testIdentity, { logId: 'X' })
const logB = await Log(testIdentity3, { logId: 'X' })
const log3 = await Log(testIdentity4, { logId: 'X' })
const log = await Log(testIdentity2, { logId: 'X' })
for (let i = 1; i <= 5; i++) {
await logA.append('entryA' + i, nextPointersAmount)
@ -804,9 +804,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
beforeEach(async () => {
const ts = new Date().getTime()
log1 = Log(testIdentity, { logId: 'X' })
log2 = Log(testIdentity2, { logId: 'X' })
log3 = Log(testIdentity3, { logId: 'X' })
log1 = await Log(testIdentity, { logId: 'X' })
log2 = await Log(testIdentity2, { logId: 'X' })
log3 = await Log(testIdentity3, { logId: 'X' })
items1 = []
items2 = []
items3 = []

View File

@ -1,7 +1,7 @@
import { strictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log, LRUStorage } from '../src/log.js'
import { Log } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
@ -20,7 +20,6 @@ Object.keys(testAPIs).forEach((IPFS) => {
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let keystore, signingKeystore
const storage = LRUStorage()
before(async () => {
rmrf(identityKeysPath)
@ -42,12 +41,12 @@ Object.keys(testAPIs).forEach((IPFS) => {
await signingKeystore.close()
})
describe('References', () => {
describe('References', async () => {
const amount = 64
it('creates entries with 2 references', async () => {
const maxReferenceDistance = 2
const log1 = Log(testIdentity, { logId: 'A', storage })
const log1 = await Log(testIdentity, { logId: 'A' })
for (let i = 0; i < amount; i++) {
await log1.append(i.toString(), { pointerCount: maxReferenceDistance })
@ -60,7 +59,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('creates entries with 4 references', async () => {
const maxReferenceDistance = 2
const log2 = Log(testIdentity, { logId: 'B', storage })
const log2 = await Log(testIdentity, { logId: 'B' })
for (let i = 0; i < amount * 2; i++) {
await log2.append(i.toString(), { pointerCount: Math.pow(maxReferenceDistance, 2) })
@ -73,7 +72,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('creates entries with 8 references', async () => {
const maxReferenceDistance = 2
const log3 = Log(testIdentity, { logId: 'C', storage })
const log3 = await Log(testIdentity, { logId: 'C' })
for (let i = 0; i < amount * 3; i++) {
await log3.append(i.toString(), { pointerCount: Math.pow(maxReferenceDistance, 3) })
@ -86,7 +85,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('creates entries with 16 references', async () => {
const maxReferenceDistance = 2
const log4 = Log(testIdentity, { logId: 'D', storage })
const log4 = await Log(testIdentity, { logId: 'D' })
for (let i = 0; i < amount * 4; i++) {
await log4.append(i.toString(), { pointerCount: Math.pow(maxReferenceDistance, 4) })
@ -124,7 +123,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
inputs.forEach(input => {
it(`has ${input.refLength} references, max distance ${input.referenceCount}, total of ${input.amount} entries`, async () => {
const test = async (amount, referenceCount, refLength) => {
const log1 = Log(testIdentity, { logId: 'A', storage })
const log1 = await Log(testIdentity, { logId: 'A' })
for (let i = 0; i < amount; i++) {
await log1.append((i + 1).toString(), { pointerCount: referenceCount })
}

View File

@ -41,55 +41,77 @@ Object.keys(testAPIs).forEach((IPFS) => {
await signingKeystore.close()
})
describe('constructor', async () => {
describe('create', async () => {
it('creates an empty log with default params', async () => {
const log = Log(testIdentity)
const log = await Log(testIdentity)
notStrictEqual(log.heads, undefined)
notStrictEqual(log.id, undefined)
notStrictEqual(log.id, undefined)
notStrictEqual(log.clock(), undefined)
notStrictEqual(log.heads(), undefined)
deepStrictEqual(log.heads(), [])
notStrictEqual(await log.heads(), undefined)
deepStrictEqual(await log.heads(), [])
const values = await log.values()
deepStrictEqual(values, [])
})
it('sets an id', () => {
const log = Log(testIdentity, { logId: 'ABC' })
it('sets an id', async () => {
const log = await Log(testIdentity, { logId: 'ABC' })
strictEqual(log.id, 'ABC')
})
it('sets the clock id', () => {
const log = Log(testIdentity, { logId: 'ABC' })
it('sets the clock id', async () => {
const log = await Log(testIdentity, { logId: 'ABC' })
strictEqual(log.id, 'ABC')
strictEqual(log.clock().id, testIdentity.publicKey)
strictEqual((await log.clock()).id, testIdentity.publicKey)
})
it('generates id string if id is not passed as an argument', () => {
const log = Log(testIdentity)
it('generates id string if id is not passed as an argument', async () => {
const log = await Log(testIdentity)
strictEqual(typeof log.id === 'string', true)
})
it('sets heads if given as params', async () => {
it('sets one head if multiple are given as params', async () => {
const one = await create(testIdentity, 'A', 'entryA', null, [])
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash])
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash])
const storage = MemoryStorage()
await storage.add(one.hash, one.bytes)
await storage.add(two.hash, two.bytes)
await storage.add(three.hash, three.bytes)
const log = Log(testIdentity, { logId: 'A', logHeads: [three], storage })
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash])
const storage = await MemoryStorage()
await storage.put(one.hash, one.bytes)
await storage.put(two.hash, two.bytes)
await storage.put(three.hash, three.bytes)
await storage.put(four.hash, four.bytes)
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, three, two, two], storage })
const values = await log.values()
strictEqual(log.heads().length, 1)
strictEqual(log.heads()[0].hash, three.hash)
const heads = await log.heads()
strictEqual(heads.length, 1)
strictEqual(heads[0].hash, three.hash)
strictEqual(values.length, 3)
})
it('throws an error if heads is not an array', () => {
it('sets two heads if two given as params', async () => {
const one = await create(testIdentity, 'A', 'entryA', null, [])
const two = await create(testIdentity, 'A', 'entryB', null, [one.hash])
const three = await create(testIdentity, 'A', 'entryC', null, [two.hash])
const four = await create(testIdentity, 'A', 'entryD', null, [two.hash])
const storage = await MemoryStorage()
await storage.put(one.hash, one.bytes)
await storage.put(two.hash, two.bytes)
await storage.put(three.hash, three.bytes)
await storage.put(four.hash, four.bytes)
const log = await Log(testIdentity, { logId: 'A', logHeads: [three, four, two], storage })
const values = await log.values()
const heads = await log.heads()
strictEqual(heads.length, 2)
strictEqual(heads[1].hash, three.hash)
strictEqual(heads[0].hash, four.hash)
strictEqual(values.length, 4)
})
it('throws an error if heads is not an array', async () => {
let err
try {
Log(testIdentity, { logId: 'A', entries: [], logHeads: {} })
await Log(testIdentity, { logId: 'A', entries: [], logHeads: {} })
} catch (e) {
err = e
}
@ -98,16 +120,16 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('creates default public AccessController if not defined', async () => {
const log = Log(testIdentity)
const log = await Log(testIdentity)
const anyoneCanAppend = await log.access.canAppend('any')
notStrictEqual(log.access, undefined)
strictEqual(anyoneCanAppend, true)
})
it('throws an error if identity is not defined', () => {
it('throws an error if identity is not defined', async () => {
let err
try {
Log()
await Log()
} catch (e) {
err = e
}
@ -118,7 +140,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
describe('values', () => {
it('returns all entries in the log', async () => {
const log = Log(testIdentity)
const log = await Log(testIdentity)
let values = await log.values()
strictEqual(values instanceof Array, true)
strictEqual(values.length, 0)

View File

@ -1,171 +0,0 @@
import { deepStrictEqual, strictEqual } from 'assert'
import rimraf from 'rimraf'
import * as Log from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
import { copy } from 'fs-extra'
import KeyValueStore from '../src/kv.js'
import Database from '../src/database.js'
// Test utils
import { config, testAPIs, startIpfs, stopIpfs, getIpfsPeerId, waitForPeers, connectPeers } from 'orbit-db-test-utils'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
Object.keys(testAPIs).forEach((IPFS) => {
describe('New Log (' + IPFS + ')', function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let ipfsd1, ipfsd2
let ipfs1, ipfs2
let keystore, signingKeystore
let peerId1, peerId2
let testIdentity1, testIdentity2
let kv1, kv2
const databaseId = 'AAA'
before(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, signingKeysPath)
// Start two IPFS instances
ipfsd1 = await startIpfs(IPFS, config.daemon1)
ipfsd2 = await startIpfs(IPFS, config.daemon2)
ipfs1 = ipfsd1.api
ipfs2 = ipfsd2.api
await connectPeers(ipfs1, ipfs2)
// Get the peer IDs
peerId1 = await getIpfsPeerId(ipfs1)
peerId2 = await getIpfsPeerId(ipfs2)
keystore = new Keystore(identityKeysPath)
signingKeystore = new Keystore(signingKeysPath)
// Create an identity for each peers
testIdentity1 = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
})
after(async () => {
if (kv1) {
await kv1.close()
}
if (kv2) {
await kv2.close()
}
if (ipfsd1) {
await stopIpfs(ipfsd1)
}
if (ipfsd2) {
await stopIpfs(ipfsd2)
}
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
rmrf(identityKeysPath)
rmrf(signingKeysPath)
})
describe('traverse', () => {
it('returns all entries in the log', async () => {
const accessController = {
canAppend: (entry) => entry.identity.id === testIdentity1.id
}
kv1 = await KeyValueStore(Log, Database, ipfs1, testIdentity1, databaseId, accessController)
kv2 = await KeyValueStore(Log, Database, ipfs2, testIdentity2, databaseId, accessController)
await waitForPeers(ipfs1, [peerId2], databaseId)
await waitForPeers(ipfs2, [peerId1], databaseId)
await kv1.set('init', true)
await kv1.set('hello', 'friend')
await kv1.del('hello')
await kv1.set('hello', 'friend2')
await kv1.del('hello')
await kv1.set('empty', '')
await kv1.del('empty')
const hash = await kv1.set('hello', 'friend3')
const lastEntry = await kv1.database.log.get(hash)
// const sleep = (time) => new Promise((resolve) => {
// setTimeout(() => {
// resolve()
// }, time)
// })
// await sleep(100)
console.time('sync')
await kv2.sync(lastEntry.bytes)
console.timeEnd('sync')
// write access test
let errorMessage
try {
await kv2.set('hello', 'friend4')
} catch (e) {
errorMessage = e.message
} finally {
const valueNotUpdated = await kv2.get('hello')
strictEqual(valueNotUpdated, 'friend3')
// strictEqual(errorMessage, 'Could not append entry:\nKey "0358df8eb5def772917748fdf8a8b146581ad2041eae48d66cc6865f11783499a6" is not allowed to write to the log')
strictEqual(errorMessage.startsWith('Could not append entry:\nKey'), true)
}
console.time('get')
const value0 = await kv2.get('init')
console.timeEnd('get')
console.log(value0)
deepStrictEqual(value0, true)
const value2 = await kv2.get('hello')
console.log(value2)
deepStrictEqual(value2, 'friend3')
const value1 = await kv1.get('hello')
console.log(value1)
deepStrictEqual(value1, 'friend3')
const value9 = await kv1.get('empty')
console.log(value9)
deepStrictEqual(value9, undefined)
const all2 = []
console.time('all2')
for await (const keyValue of kv2.all()) {
console.log('>', keyValue)
all2.push(keyValue)
}
console.timeEnd('all2')
deepStrictEqual(all2, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
const all1 = []
console.time('all1')
for await (const keyValue of kv1.all()) {
console.log('>', keyValue)
all1.push(keyValue)
}
console.timeEnd('all1')
deepStrictEqual(all1, [
{ key: 'hello', value: 'friend3' },
{ key: 'init', value: true }
])
})
})
})
})

View File

@ -1,7 +1,7 @@
import { strictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log, Entry } from '../src/log.js'
import { Log, Entry, IPFSBlockStorage } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
@ -20,6 +20,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let keystore, signingKeystore
let storage1, storage2
before(async () => {
rmrf(identityKeysPath)
@ -45,6 +46,9 @@ Object.keys(testAPIs).forEach((IPFS) => {
// Create an identity for each peers
testIdentity = await createIdentity({ id: 'userB', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userA', keystore, signingKeystore })
storage1 = await IPFSBlockStorage({ ipfs: ipfs1 })
storage2 = await IPFSBlockStorage({ ipfs: ipfs2 })
})
after(async () => {
@ -57,7 +61,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
await signingKeystore.close()
})
describe('replicates logs deterministically', function () {
describe('replicates logs deterministically', async function () {
const amount = 128 + 1
const logId = 'A'
@ -69,6 +73,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
try {
if (!messageIsFromMe(message)) {
const entry = await Entry.decode(message.data)
await storage1.put(entry.hash, entry.bytes)
await log1.joinEntry(entry)
}
} catch (e) {
@ -82,6 +87,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
try {
if (!messageIsFromMe(message)) {
const entry = await Entry.decode(message.data)
await storage2.put(entry.hash, entry.bytes)
await log2.joinEntry(entry)
}
} catch (e) {
@ -90,10 +96,10 @@ Object.keys(testAPIs).forEach((IPFS) => {
}
beforeEach(async () => {
log1 = Log(testIdentity, { logId })
log2 = Log(testIdentity2, { logId })
input1 = Log(testIdentity, { logId })
input2 = Log(testIdentity2, { logId })
log1 = await Log(testIdentity, { logId, storage: storage1 })
log2 = await Log(testIdentity2, { logId, storage: storage2 })
input1 = await Log(testIdentity, { logId, storage: storage1 })
input2 = await Log(testIdentity2, { logId, storage: storage2 })
await ipfs1.pubsub.subscribe(logId, handleMessage1)
await ipfs2.pubsub.subscribe(logId, handleMessage2)
})
@ -134,7 +140,7 @@ Object.keys(testAPIs).forEach((IPFS) => {
await whileProcessingMessages(config.timeout)
const result = Log(testIdentity, { logId })
const result = await Log(testIdentity, { logId, storage: storage1 })
await result.join(log1)
await result.join(log2)

View File

@ -1,55 +1,60 @@
import { notStrictEqual, strictEqual, deepStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { copy } from 'fs-extra'
import { Log } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
// Test utils
import { config, testAPIs } from 'orbit-db-test-utils'
import { identityKeys, signingKeys } from './fixtures/orbit-db-identity-keys.js'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
let testIdentity, testIdentity2
Object.keys(testAPIs).forEach((IPFS) => {
describe('Signed Log (' + IPFS + ')', function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let keystore, signingKeystore
let testIdentity, testIdentity2
before(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, signingKeysPath)
keystore = new Keystore('./keys_1')
await keystore.open()
for (const [key, value] of Object.entries(identityKeys)) {
await keystore.addKey(key, value)
}
keystore = new Keystore(identityKeysPath)
signingKeystore = new Keystore(signingKeysPath)
signingKeystore = new Keystore('./keys_2')
await signingKeystore.open()
for (const [key, value] of Object.entries(signingKeys)) {
await signingKeystore.addKey(key, value)
}
testIdentity = await createIdentity({ id: 'userA', keystore, signingKeystore })
testIdentity2 = await createIdentity({ id: 'userB', keystore, signingKeystore })
})
after(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await keystore.close()
await signingKeystore.close()
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
rmrf('./keys_1')
rmrf('./keys_2')
})
it('creates a signed log', () => {
it('creates a signed log', async () => {
const logId = 'A'
const log = Log(testIdentity, { logId })
const log = await Log(testIdentity, { logId })
notStrictEqual(log.id, null)
strictEqual(log.id, logId)
})
// it('has the correct identity', () => {
// const log = Log(testIdentity, { logId: 'A' })
// const log = await Log(testIdentity, { logId: 'A' })
// notStrictEqual(log.id, null)
// strictEqual(log.id, 'A')
// strictEqual(log.identity.id, '03e0480538c2a39951d054e17ff31fde487cb1031d0044a037b53ad2e028a3e77c')
@ -58,23 +63,23 @@ Object.keys(testAPIs).forEach((IPFS) => {
// strictEqual(log.identity.signatures.publicKey, '3044022020982b8492be0c184dc29de0a3a3bd86a86ba997756b0bf41ddabd24b47c5acf02203745fda39d7df650a5a478e52bbe879f0cb45c074025a93471414a56077640a4')
// })
it('has the correct public key', () => {
const log = Log(testIdentity, { logId: 'A' })
it('has the correct public key', async () => {
const log = await Log(testIdentity, { logId: 'A' })
strictEqual(log.identity.publicKey, testIdentity.publicKey)
})
it('has the correct pkSignature', () => {
const log = Log(testIdentity, { logId: 'A' })
it('has the correct pkSignature', async () => {
const log = await Log(testIdentity, { logId: 'A' })
strictEqual(log.identity.signatures.id, testIdentity.signatures.id)
})
it('has the correct signature', () => {
const log = Log(testIdentity, { logId: 'A' })
it('has the correct signature', async () => {
const log = await Log(testIdentity, { logId: 'A' })
strictEqual(log.identity.signatures.publicKey, testIdentity.signatures.publicKey)
})
it('entries contain an identity', async () => {
const log = Log(testIdentity, { logId: 'A' })
const log = await Log(testIdentity, { logId: 'A' })
await log.append('one')
const values = await log.values()
notStrictEqual(values[0].sig, null)
@ -84,48 +89,23 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('doesn\'t sign entries when identity is not defined', async () => {
let err
try {
Log(null)
await Log(null)
} catch (e) {
err = e
}
strictEqual(err.message, 'Identity is required')
})
it('doesn\'t join logs with different IDs ', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity2, { logId: 'A' })
let err
try {
await log1.append('one')
await log2.append('two')
await log2.append('three')
await log1.join(log2)
} catch (e) {
err = e.toString()
throw e
}
const values = await log1.values()
strictEqual(err, undefined)
strictEqual(log1.id, 'A')
strictEqual(values.length, 3)
strictEqual(values[0].payload, 'two')
strictEqual(values[1].payload, 'one')
strictEqual(values[2].payload, 'three')
})
it('throws an error if log is signed but trying to merge with an entry that doesn\'t have public signing key', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity2, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity2, { logId: 'A' })
let err
try {
await log1.append('one')
await log2.append('two')
delete log2.heads()[0].key
await log1.join(log2)
const entry = await log2.append('two')
delete entry.key
await log1.joinEntry(entry)
} catch (e) {
err = e.toString()
}
@ -133,15 +113,15 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('throws an error if log is signed but trying to merge an entry that doesn\'t have a signature', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity2, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity2, { logId: 'A' })
let err
try {
await log1.append('one')
await log2.append('two')
delete log2.heads()[0].sig
await log1.join(log2)
const entry = await log2.append('two')
delete entry.sig
await log1.joinEntry(entry)
} catch (e) {
err = e.toString()
}
@ -149,15 +129,15 @@ Object.keys(testAPIs).forEach((IPFS) => {
})
it('throws an error if log is signed but the signature doesn\'t verify', async () => {
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity2, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity2, { logId: 'A' })
let err
try {
await log1.append('one')
await log2.append('two')
log2.heads()[0].sig = log1.heads()[0].sig
await log1.join(log2)
const entry1 = await log1.append('one')
const entry2 = await log2.append('two')
entry2.sig = entry1.sig
await log1.joinEntry(entry2)
} catch (e) {
err = e.toString()
}
@ -171,8 +151,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
it('throws an error if entry doesn\'t have append access', async () => {
const denyAccess = { canAppend: () => false }
const log1 = Log(testIdentity, { logId: 'A' })
const log2 = Log(testIdentity2, { logId: 'A', access: denyAccess })
const log1 = await Log(testIdentity, { logId: 'A' })
const log2 = await Log(testIdentity2, { logId: 'A', access: denyAccess })
let err
try {
@ -190,8 +170,8 @@ Object.keys(testAPIs).forEach((IPFS) => {
const testACL = {
canAppend: (entry) => entry.identity.id !== testIdentity2.id
}
const log1 = Log(testIdentity, { logId: 'A', access: testACL })
const log2 = Log(testIdentity2, { logId: 'A' })
const log1 = await Log(testIdentity, { logId: 'A', access: testACL })
const log2 = await Log(testIdentity2, { logId: 'A' })
let err
try {

142
test/storage.spec.js Normal file
View File

@ -0,0 +1,142 @@
import * as IPFS from 'ipfs'
import { strictEqual, notStrictEqual } from 'assert'
import rimraf from 'rimraf'
import { Log, IPFSBlockStorage, MemoryStorage, LRUStorage, ComposedStorage } from '../src/log.js'
import IdentityProvider from 'orbit-db-identity-provider'
import Keystore from '../src/Keystore.js'
import { copy } from 'fs-extra'
// Test utils
import { config, testAPIs } from 'orbit-db-test-utils'
const { sync: rmrf } = rimraf
const { createIdentity } = IdentityProvider
Object.keys(testAPIs).forEach((_) => {
describe('Storages (' + _ + ')', function () {
this.timeout(config.timeout)
const { identityKeyFixtures, signingKeyFixtures, identityKeysPath, signingKeysPath } = config
let ipfs1
let keystore, signingKeystore
let testIdentity1
before(async () => {
rmrf(identityKeysPath)
rmrf(signingKeysPath)
await copy(identityKeyFixtures, identityKeysPath)
await copy(signingKeyFixtures, signingKeysPath)
rmrf('./ipfs1')
await copy('./test/fixtures/ipfs1', './ipfs1')
// Start an IPFS instance
ipfs1 = await IPFS.create({ ...config.daemon1, repo: './ipfs1' })
keystore = new Keystore(identityKeysPath)
signingKeystore = new Keystore(signingKeysPath)
// Create an identity for each peers
testIdentity1 = await createIdentity({ id: 'userA', keystore, signingKeystore })
})
after(async () => {
if (ipfs1) {
await ipfs1.stop()
}
if (keystore) {
await keystore.close()
}
if (signingKeystore) {
await signingKeystore.close()
}
rmrf(identityKeysPath)
rmrf(signingKeysPath)
rmrf(testIdentity1.id)
rmrf('./orbitdb')
rmrf('./ipfs1')
})
const runTestWithStorage = async (storage) => {
const amount = 100
const log1 = await Log(testIdentity1, { logId: 'A', storage })
const log2 = await Log(testIdentity1, { logId: 'A', storage })
for (let i = 0; i < amount; i++) {
await log1.append('hello' + i)
await log2.append('hello' + i)
}
// await log2.join(log1)
const values = await log1.values()
const heads = await log1.heads()
strictEqual(heads.length, 1)
strictEqual(values.length, amount)
await log1.storage.clear()
await log2.storage.clear()
// const values2 = await log2.values()
// const heads2 = await log2.heads()
// strictEqual(heads2.length, 0)
// strictEqual(values2.length, 0)
await log1.storage.close()
await log2.storage.close()
}
describe('LRUStorage', () => {
it('tests the storage', async () => {
const storage = await LRUStorage()
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
})
describe('MemoryStorage', () => {
it('tests the storage', async () => {
const storage = await MemoryStorage()
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
})
describe('IPFSBlockStorage', () => {
it('tests the storage', async () => {
const storage = await IPFSBlockStorage({ ipfs: ipfs1 })
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
})
describe('Composed Storages', () => {
it('tests Memory + IPFSBlockStorage composition', async () => {
const storage1 = await MemoryStorage()
const storage2 = await IPFSBlockStorage({ ipfs: ipfs1 })
const storage = await ComposedStorage(storage1, storage2)
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
it('tests LRU + IPFSBlockStorage composition', async () => {
const storage1 = await LRUStorage({ size: -1 })
const storage2 = await IPFSBlockStorage({ ipfs: ipfs1 })
const storage = await ComposedStorage(storage1, storage2)
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
it('tests Memory + LRU composition', async () => {
const storage1 = await MemoryStorage()
const storage2 = await LRUStorage({ size: -1 })
const storage = await ComposedStorage(storage1, storage2)
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
it('tests LRU + Memory composition', async () => {
const storage1 = await LRUStorage({ size: -1 })
const storage2 = await MemoryStorage()
const storage = await ComposedStorage(storage1, storage2)
notStrictEqual(storage, undefined)
await runTestWithStorage(storage)
})
})
})
})

View File

@ -1,10 +1,10 @@
class LogCreator {
static async createLogWithSixteenEntries (Log, ipfs, identities) {
const create = async () => {
const logA = Log(identities[0], { logId: 'X' })
const logB = Log(identities[1], { logId: 'X' })
const log3 = Log(identities[2], { logId: 'X' })
const log = Log(identities[3], { logId: 'X' })
const logA = await Log(identities[0], { logId: 'X' })
const logB = await Log(identities[1], { logId: 'X' })
const log3 = await Log(identities[2], { logId: 'X' })
const log = await Log(identities[3], { logId: 'X' })
for (let i = 1; i <= 5; i++) {
await logA.append('entryA' + i)
@ -41,8 +41,8 @@ class LogCreator {
const expectedData = []
const create = async () => {
const logA = Log(identities[0], { logId: 'X' })
const logB = Log(identities[1], { logId: 'X' })
const logA = await Log(identities[0], { logId: 'X' })
const logB = await Log(identities[1], { logId: 'X' })
for (let i = 1; i <= amount; i++) {
await logA.append('entryA' + i)
await logB.join(logA)