Add jsdocs

This commit is contained in:
haad
2023-04-13 09:15:27 +03:00
parent ec06a8e2d0
commit c117ef63ba
27 changed files with 649 additions and 61 deletions

View File

@@ -1,3 +1,4 @@
/** @module AccessControllers */
import IPFSAccessController from './ipfs.js'
import OrbitDBAccessController from './orbitdb.js'

View File

@@ -1,3 +1,7 @@
/**
* @namespace AccessControllers-IPFS
* @memberof module:AccessControllers
*/
import { IPFSBlockStorage, LRUStorage, ComposedStorage } from '../storage/index.js'
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'

View File

@@ -1,3 +1,7 @@
/**
* @namespace AccessControllers-OrbitDB
* @memberof module:AccessControllers
*/
import ensureACAddress from '../utils/ensure-ac-address.js'
import IPFSAccessController from './ipfs.js'

View File

@@ -1,3 +1,4 @@
/** @namespace Address */
import { CID } from 'multiformats/cid'
import { base58btc } from 'multiformats/bases/base58'
import { posixJoin } from './utils/path-join.js'

View File

@@ -1,3 +1,4 @@
/** @module Database */
import { EventEmitter } from 'events'
import PQueue from 'p-queue'
import Sync from './sync.js'
@@ -9,6 +10,11 @@ const defaultReferencesCount = 16
const defaultCacheSize = 1000
const Database = async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {
/**
* @namespace module:Database~Database
* @description The instance returned by {@link module:Database}.
*/
directory = pathJoin(directory || './orbitdb', `./${address}/`)
meta = meta || {}
referencesCount = referencesCount || defaultReferencesCount

View File

@@ -1,3 +1,8 @@
/**
* @namespace Database-Documents
* @memberof module:Database
* @description Documents Database
*/
import Database from '../database.js'
const DefaultOptions = { indexBy: '_id' }

View File

@@ -1,3 +1,8 @@
/**
* @namespace Database-Events
* @memberof module:Database
* @description Events Database
*/
import Database from '../database.js'
const Events = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {

View File

@@ -1,3 +1,8 @@
/**
* @namespace Database-KeyValueIndexed
* @memberof module:Database
* @description KeyValueIndexed Database
*/
import { KeyValue } from './index.js'
import LevelStorage from '../storage/level.js'
import pathJoin from '../utils/path-join.js'

View File

@@ -1,3 +1,8 @@
/**
* @namespace Database-KeyValue
* @memberof module:Database
* @description KeyValue Database
*/
import Database from '../database.js'
const KeyValue = () => async ({ ipfs, identity, address, name, access, directory, meta, headsStorage, entryStorage, indexStorage, referencesCount, syncAutomatically, onUpdate }) => {

View File

@@ -1,3 +1,4 @@
/** @module Identities */
import Identity, { isIdentity, isEqual, decodeIdentity } from './identity.js'
import { PublicKeyIdentityProvider } from './providers/index.js'
// import DIDIdentityProvider from './identity-providers/did.js'

View File

@@ -1,3 +1,4 @@
/** @module IdentityProviders */
export {
default as Identities,
addIdentityProvider,

View File

@@ -1,3 +1,7 @@
/**
* @namespace module:IdentityProviders.IdentityProviders-PublicKey
* @description PublicKey Identity Provider
*/
import { toString as uint8ArrayToString } from 'uint8arrays/to-string'
import IdentityProvider from './interface.js'
import { signMessage, verifyMessage } from '../../key-store.js'

View File

@@ -1,3 +1,4 @@
/** @namespace Manifest */
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'
import { sha256 } from 'multiformats/hashes/sha2'

View File

@@ -1,14 +1,14 @@
/**
* @namespace module:Log~Entry
* @memberof module:Log
* @description Log Entry
*/
import Clock from './clock.js'
import * as Block from 'multiformats/block'
import * as dagCbor from '@ipld/dag-cbor'
import { sha256 } from 'multiformats/hashes/sha2'
import { base58btc } from 'multiformats/bases/base58'
/*
* @description
* A Log entry
*/
const codec = dagCbor
const hasher = sha256
const hashStringEncoding = base58btc

View File

@@ -1,3 +1,12 @@
/**
* @module Log
* @description
* Log is a verifiable, append-only log CRDT.
*
* Implemented as a Merkle-CRDT as per the paper:
* "Merkle-CRDTs: Merkle-DAGs meet CRDTs"
* https://arxiv.org/abs/2004.00107
*/
import LRU from 'lru'
import Entry from './entry.js'
import Clock from './clock.js'
@@ -25,17 +34,10 @@ const DefaultAccessController = async () => {
}
}
/**
* @description
* Log is a verifiable, append-only log CRDT.
*
* Implemented as a Merkle-CRDT as per the paper:
* "Merkle-CRDTs: Merkle-DAGs meet CRDTs"
* https://arxiv.org/abs/2004.00107
*/
/**
* Create a new Log instance
* @function
* @param {IPFS} ipfs An IPFS instance
* @param {Object} identity Identity (https://github.com/orbitdb/orbit-db-identity-provider/blob/master/src/identity.js)
* @param {Object} options
@@ -45,9 +47,16 @@ const DefaultAccessController = async () => {
* @param {Array<Entry>} options.heads Set the heads of the log
* @param {Clock} options.clock Set the clock of the log
* @param {Function} options.sortFn The sort function - by default LastWriteWins
* @return {Log} The log instance
* @return {module:Log~Log} sync An instance of Log
* @memberof module:Log
* @instance
*/
const Log = async (identity, { logId, logHeads, access, entryStorage, headsStorage, indexStorage, sortFn } = {}) => {
/**
* @namespace Log
* @description The instance returned by {@link module:Log}.
*/
if (identity == null) {
throw new Error('Identity is required')
}
@@ -72,6 +81,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
/**
* Returns the clock of the log.
* @returns {Clock}
* @memberof module:Log~Log
* @instance
*/
const clock = async () => {
// Find the latest clock from the heads
@@ -80,8 +91,11 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
}
/**
* Returns an array of entries
* @returns {Array<Entry>}
* Returns the current heads of the log
*
* @returns {Array<module:Log~Entry>}
* @memberof module:Log~Log
* @instance
*/
const heads = async () => {
const res = await _heads.all()
@@ -89,8 +103,11 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
}
/**
* Returns the values in the log.
* @returns {Promise<Array<Entry>>}
* Returns all entries in the log
*
* @returns {Array<module:Log~Entry>}
* @memberof module:Log~Log
* @instance
*/
const values = async () => {
const values = []
@@ -101,9 +118,12 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
}
/**
* Retrieve an entry.
* @param {string} [hash] The hash of the entry to retrieve
* @returns {Promise<Entry|undefined>}
* Retrieve an entry
*
* @param {string} hash The hash of the entry to retrieve
* @returns {module:Log~Entry}
* @memberof module:Log~Log
* @instance
*/
const get = async (hash) => {
const bytes = await _entries.get(hash)
@@ -120,9 +140,14 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
}
/**
* Append an new entry to the log.
* Append an new entry to the log
*
* @param {data} data Payload to add to the entry
* @return {Promise<Entry>} Entry that was appended
* @param {Object} options
* @param {Integer} options.referencesCount TODO
* @return {module:Log~Entry} Entry that was appended
* @memberof module:Log~Log
* @instance
*/
const append = async (data, options = { referencesCount: 0 }) => {
// 1. Prepare entry
@@ -169,10 +194,14 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
*
* Joins another log into this one.
*
* @param {Log} log Log to join with this Log
* @returns {Promise<Log>} This Log instance
* @param {module:Log~Log} log Log to join with this Log
*
* @example
*
* await log1.join(log2)
*
* @memberof module:Log~Log
* @instance
*/
const join = async (log) => {
if (!log) {
@@ -193,10 +222,14 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
/**
* Join an entry into a log.
*
* @param {Entry} entry Entry to join with this Log
* @returns {Promise<Log>} This Log instance
* @param {module:Log~Entry} entry Entry to join with this Log
*
* @example
*
* await log.join(entry)
*
* @memberof module:Log~Log
* @instance
*/
const joinEntry = async (entry) => {
const { hash } = entry
@@ -246,6 +279,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
/**
* TODO
* @memberof module:Log~Log
* @instance
*/
const traverse = async function * (rootEntries, shouldStopFn) {
// By default, we don't stop traversal and traverse
@@ -309,15 +344,15 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
}
}
/*
/**
* Async iterator over the log entries
*
* @param {Object} options
* @param {amount} options.amount Number of entried to return
* @param {string|Array} options.gt Beginning hash of the iterator, non-inclusive
* @param {string|Array} options.gte Beginning hash of the iterator, inclusive
* @param {string|Array} options.lt Ending hash of the iterator, non-inclusive
* @param {string|Array} options.lte Ending hash of the iterator, inclusive
* @param {amount} options.amount Number of entried to return. Default: return all entries.
* @param {string} options.gt Beginning hash of the iterator, non-inclusive
* @param {string} options.gte Beginning hash of the iterator, inclusive
* @param {string} options.lt Ending hash of the iterator, non-inclusive
* @param {string} options.lte Ending hash of the iterator, inclusive
* @returns {Symbol.asyncIterator} Iterator object of log entries
*
* @examples
@@ -339,7 +374,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* }
* })()
*
*
* @memberof module:Log~Log
* @instance
*/
const iterator = async function * ({ amount = -1, gt, gte, lt, lte } = {}) {
// TODO: write comments on how the iterator algorithm works
@@ -412,12 +448,22 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
}
}
/**
* Clear all entries from the log and the underlying storages
* @memberof module:Log~Log
* @instance
*/
const clear = async () => {
await _index.clear()
await _heads.clear()
await _entries.clear()
}
/**
* Close the log and underlying storages
* @memberof module:Log~Log
* @instance
*/
const close = async () => {
await _index.close()
await _heads.close()
@@ -428,6 +474,8 @@ const Log = async (identity, { logId, logHeads, access, entryStorage, headsStora
* Check if an object is a Log.
* @param {Log} obj
* @returns {boolean}
* @memberof module:Log~Log
* @instance
*/
const isLog = (obj) => {
return obj && obj.id !== undefined &&

View File

@@ -1,3 +1,4 @@
/** @module OrbitDB */
import { Events, KeyValue, Documents } from './db/index.js'
import KeyStore from './key-store.js'
import { Identities } from './identities/index.js'

View File

@@ -1,3 +1,8 @@
/**
* @namespace Storage-Composed
* @memberof module:Storage
*/
// Compose storages:
// const storage1 = await ComposedStorage(await LRUStorage(), await LevelStorage())
// const storage2 = await ComposedStorage(storage1, await IPFSBlockStorage())

View File

@@ -1,3 +1,4 @@
/** @module Storage */
export { default as ComposedStorage } from './composed.js'
export { default as IPFSBlockStorage } from './ipfs-block.js'
export { default as LevelStorage } from './level.js'

View File

@@ -1,3 +1,7 @@
/**
* @namespace Storage-IPFS
* @memberof module:Storage
*/
import { CID } from 'multiformats/cid'
import { base58btc } from 'multiformats/bases/base58'

View File

@@ -1,3 +1,7 @@
/**
* @namespace Storage-Level
* @memberof module:Storage
*/
import { Level } from 'level'
const defaultValueEncoding = 'view'

View File

@@ -1,3 +1,7 @@
/**
* @namespace Storage-LRU
* @memberof module:Storage
*/
import LRU from 'lru'
const defaultSize = 1000000

View File

@@ -1,3 +1,7 @@
/**
* @namespace Storage-Memory
* @memberof module:Storage
*/
const MemoryStorage = async () => {
let memory = {}

View File

@@ -7,41 +7,105 @@ import pathJoin from './utils/path-join.js'
const DefaultTimeout = 30000 // 30 seconds
/**
* @module Sync
* @description
* Syncs an append-only, conflict-free replicated data type (CRDT) log between
* multiple peers.
* The Sync Protocol for OrbitDB synchronizes the database operations {@link module:Log} between multiple peers.
*
* The sync protocol synchronizes heads between multiple peers, both during
* startup and also when new entries are appended to the log.
* The Sync Protocol sends and receives heads between multiple peers,
* both when opening a database and when a database is updated, ie.
* new entries are appended to the log.
*
* When Sync is started, peers "dial" each other using libp2p's custom protocol
* handler and initiate the exchange of heads each peer currently has. Once
* initial sync has completed, peers notify one another of updates to heads
* using pubsub "subscribe" with the same log.id topic. A peer with new heads
* can broadcast changes to other peers using pubsub "publish". Peers
* subscribed to the same topic will then be notified and will update their
* heads accordingly.
* When Sync is started, a peer subscribes to a pubsub topic of the log's id.
* Upon subscribing to the topic, peers already connected to the topic receive
* the subscription message and "dial" the subscribing peer using a libp2p
* custom protocol. Once connected to the subscribing peer on a direct peer-to-peer
* connection, the dialing peer and the subscribing peer exchange the heads of the Log
* each peer currently has. Once completed, the peers have the same "local state".
*
* The sync protocol only guarantees that the message is published; it does not
* guarantee the order in which messages are received or even that the message
* is recieved at all. The sync protocol only guarantees that heads will
* eventually reach consistency between all peers with the same address.
* Once the initial sync has completed, peers notify one another of updates to the
* log, ie. updates to the database, using the initially opened pubsub topic subscription.
* A peer with new heads broadcasts changes to other peers by publishing the updated heads
* to the pubsub topic. Peers subscribed to the same topic will then receive the update and
* will update their log's state, the heads, accordingly.
*
* The Sync Protocol is eventually consistent. It guarantees that once all messages
* have been sent and received, peers will observe the same log state and values.
* The Sync Protocol does not guarantee the order in which messages are received or
* even that a message is recieved at all, nor any timing on when messages are received.
*
* Note that the Sync Protocol does not retrieve the full log when synchronizing the
* heads. Rather only the "latest entries" in the log, the heads, are exchanged. In order
* to retrieve the full log and each entry, the user would call the log.traverse() or
* log.iterator() functions, which go through the log and retrieve each missing
* log entry from IPFS.
*
* @example
* // Using defaults
* const sync = await Sync({ ipfs, log, onSynced: (peerId, heads) => ... })
*
* @example
* // Using all parameters
* const sync = await Sync({ ipfs, log, events, onSynced: (peerId, heads) => ..., start: false })
* sync.events.on('join', (peerId, heads) => ...)
* sync.events.on('leave', (peerId) => ...)
* sync.events.on('error', (err) => ...)
* await sync.start()
*/
/**
* Creates a Sync instance for sychronizing logs between multiple peers.
*
* @function
* @param {Object} params One or more parameters for configuring Sync.
* @param {IPFS} params.ipfs An IPFS instance. Used for synchronizing peers.
* @param {Log} params.log The Log instance to sync.
* @param {Object} params.events An event emitter. Defaults to an instance of
* EventEmitter. Events emitted are 'join', 'error' and 'leave'.
* @param {Function} params.onSynced A function that is called after the peer
* @param {IPFS} params.ipfs An IPFS instance.
* @param {Log} params.log The log instance to sync.
* @param {EventEmitter} [params.events] An event emitter to use. Events emitted are 'join', 'leave' and 'error'. If the parameter is not provided, an EventEmitter will be created.
* @param {onSynced} [params.onSynced] A callback function that is called after the peer
* has received heads from another peer.
* @param {Boolean} params.start True if sync should start automatically, false
* @param {Boolean} [params.start] True if sync should start automatically, false
* otherwise. Defaults to true.
* @return {Sync} The Sync protocol instance.
* @return {module:Sync~Sync} sync An instance of the Sync Protocol.
* @memberof module:Sync
* @instance
*/
const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
/**
* @namespace module:Sync~Sync
* @description The instance returned by {@link module:Sync}.
*/
/**
* Callback function when new heads have been received from other peers.
* @callback module:Sync~onSynced
* @param {PeerID} peerId PeerID of the peer who we received heads from
* @param {Entry[]} heads An array of Log entries
*/
/**
* Event fired when new heads have been received from other peers.
* @event module:Sync~Sync#join
* @param {PeerID} peerId PeerID of the peer who we received heads from
* @param {Entry[]} heads An array of Log entries
* @example
* sync.events.on('join', (peerID, heads) => ...)
*/
/**
* Event fired when a peer leaves the sync protocol.
* @event module:Sync~Sync#leave
* @param {PeerID} peerId PeerID of the peer who we received heads from
* @example
* sync.events.on('leave', (peerID) => ...)
*/
/**
* Event fired when an error occurs.
* @event module:Sync~Sync#error
* @param {Error} error The error that occured
* @example
* sync.events.on('error', (error) => ...)
*/
if (!ipfs) throw new Error('An instance of ipfs is required.')
if (!log) throw new Error('An instance of log is required.')
@@ -49,9 +113,29 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
const headsSyncAddress = pathJoin('/orbitdb/heads/', address)
const queue = new PQueue({ concurrency: 1 })
/**
* Set of currently connected peers for the log for this Sync instance.
* @name peers
* @†ype Set
* @return Set set of PeerIDs
* @memberof module:Sync~Sync
* @instance
*/
const peers = new Set()
/**
* Event emitter that emits updates.
* @name events
* @†ype EventEmitter
* @fires join when a peer has connected and heads were exchanged
* @fires leave when a peer disconnects
* @fires error when an error occurs
* @memberof module:Sync~Sync
* @instance
*/
events = events || new EventEmitter()
timeout = timeout || DefaultTimeout
let started = false
@@ -145,12 +229,25 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
queue.add(task)
}
/**
* Add a log entry to the Sync Protocol to be sent to peers.
* @function add
* @param {Entry} entry Log entry
* @memberof module:Sync~Sync
* @instance
*/
const add = async (entry) => {
if (started) {
await ipfs.pubsub.publish(address, entry.bytes)
}
}
/**
* Stop the Sync Protocol.
* @function stop
* @memberof module:Sync~Sync
* @instance
*/
const stopSync = async () => {
if (started) {
await queue.onIdle()
@@ -162,6 +259,12 @@ const Sync = async ({ ipfs, log, events, onSynced, start, timeout }) => {
}
}
/**
* Start the Sync Protocol.
* @function start
* @memberof module:Sync~Sync
* @instance
*/
const startSync = async () => {
if (!started) {
// Exchange head entries with peers when connected