unbuild - PUBLISHED! 1238

This commit is contained in:
Mark Nadal 2022-08-09 15:33:22 -07:00
parent d06359f45c
commit 07b30ed602
8 changed files with 80 additions and 37 deletions

2
gun.min.js vendored

File diff suppressed because one or more lines are too long

9
sea.js
View File

@ -1353,9 +1353,16 @@
check.any(eve, msg, val, key, soul, at, no, at.user||''); return;
eve.to.next(msg); // not handled
}
check.hash = function(eve, msg, val, key, soul, at, no){
check.hash = function(eve, msg, val, key, soul, at, no){ // mark unbuilt @i001962 's epic hex contrib!
SEA.work(val, null, function(data){
function hexToBase64(hexStr) {
let base64 = "";
for(let i = 0; i < hexStr.length; i++) {
base64 += !(i - 1 & 1) ? String.fromCharCode(parseInt(hexStr.substring(i - 1, i + 1), 16)) : ""}
return btoa(base64);}
if(data && data === key.split('#').slice(-1)[0]){ return eve.to.next(msg) }
else if (data && data === hexToBase64(key.split('#').slice(-1)[0])){
return eve.to.next(msg) }
no("Data hash not same as hash!");
}, {name: 'SHA-256'});
}

View File

@ -36,10 +36,11 @@ Gun.on('create', function lg(root){
root.on('put', function(msg){
this.to.next(msg); // remember to call next middleware adapter
var put = msg.put, soul = put['#'], key = put['.'], id = msg['#'], tmp; // pull data off wire envelope
var put = msg.put, soul = put['#'], key = put['.'], id = msg['#'], ok = msg.ok||'', tmp; // pull data off wire envelope
disk[soul] = Gun.state.ify(disk[soul], key, put['>'], put[':'], soul); // merge into disk object
if(stop && size > (4999880)){ root.on('in', {'@': id, err: "localStorage max!"}); return; }
if(!msg['@']){ acks.push(id) } // then ack any non-ack write. // TODO: use batch id.
//if(!msg['@']){ acks.push(id) } // then ack any non-ack write. // TODO: use batch id.
if(!msg['@'] && (!msg._.via || Math.random() < (ok['@'] / ok['/']))){ acks.push(id) } // then ack any non-ack write. // TODO: use batch id.
if(to){ return }
to = setTimeout(flush, 9+(size / 333)); // 0.1MB = 0.3s, 5MB = 15s
});
@ -55,7 +56,7 @@ Gun.on('create', function lg(root){
}
size = tmp.length;
if(!err && !Object.empty(opt.peers)){ return } // only ack if there are no peers. // Switch this to probabilistic mode
//if(!err && !Object.empty(opt.peers)){ return } // only ack if there are no peers. // Switch this to probabilistic mode
setTimeout.each(ack, function(id){
root.on('in', {'@': id, err: err, ok: 0}); // localStorage isn't reliable, so make its `ok` code be a low number.
},0,99);

View File

@ -81,6 +81,7 @@ function Mesh(root){
dup_track(id);
return;
}
if(tmp = msg.ok){ msg._.near = tmp['/'] }
var S = +new Date;
DBG && (DBG.is = S); peer.SI = id;
root.on('in', mesh.last = msg);
@ -126,12 +127,12 @@ function Mesh(root){
var DBG = msg.DBG, S = +new Date; meta.y = meta.y || S; if(!peer){ DBG && (DBG.y = S) }
if(!(id = msg['#'])){ id = msg['#'] = String.random(9) }
!loop && dup_track(id);//.it = it(msg); // track for 9 seconds, default. Earth<->Mars would need more! // always track, maybe move this to the 'after' logic if we split function.
if(msg.put && (msg.err || (dup.s[id]||'').err)){ return false } // TODO: in theory we should not be able to stun a message, but for now going to check if it can help network performance preventing invalid data to relay.
//if(msg.put && (msg.err || (dup.s[id]||'').err)){ return false } // TODO: in theory we should not be able to stun a message, but for now going to check if it can help network performance preventing invalid data to relay.
if(!(hash = msg['##']) && u !== msg.put && !meta.via && ack){ mesh.hash(msg, peer); return } // TODO: Should broadcasts be hashed?
if(!peer && ack){ peer = ((tmp = dup.s[ack]) && (tmp.via || ((tmp = tmp.it) && (tmp = tmp._) && tmp.via))) || ((tmp = mesh.last) && ack === tmp['#'] && mesh.leap) } // warning! mesh.leap could be buggy! mesh last check reduces this.
if(!peer && ack){ // still no peer, then ack daisy chain lost.
if(dup.s[ack]){ return } // in dups but no peer hints that this was ack to self, ignore.
console.STAT && console.STAT(+new Date, ++SMIA, 'total no peer to ack to');
if(!peer && ack){ // still no peer, then ack daisy chain 'tunnel' got lost.
if(dup.s[ack]){ return } // in dups but no peer hints that this was ack to ourself, ignore.
console.STAT && console.STAT(+new Date, ++SMIA, 'total no peer to ack to'); // TODO: Delete this now. Dropping lost ACKs is protocol fine now.
return false;
} // TODO: Temporary? If ack via trace has been lost, acks will go to all peers, which trashes browser bandwidth. Not relaying the ack will force sender to ask for ack again. Note, this is technically wrong for mesh behavior.
if(!peer && mesh.way){ return mesh.way(msg) }
@ -148,7 +149,7 @@ function Mesh(root){
//Type.obj.map(peer || opt.peers, each); // in case peer is a peer list.
loop = 1; var wr = meta.raw; meta.raw = raw; // quick perf hack
var i = 0, p; while(i < 9 && (p = (pl||'')[i++])){
if(!(p = ps[p])){ continue }
if(!(p = ps[p] || (peer||'')[p])){ continue }
mesh.say(msg, p);
}
meta.raw = wr; loop = 0;
@ -199,7 +200,7 @@ function Mesh(root){
if(!tmp['##']){ tmp['##'] = hash } // if none, add our hash to ask so anyone we relay to can dedup. // NOTE: May only check against 1st ack chunk, 2nd+ won't know and still stream back to relaying peers which may then dedup. Any way to fix this wasted bandwidth? I guess force rate limiting breaking change, that asking peer has to ask for next lexical chunk.
}
}
if(!msg.dam){
if(!msg.dam && !msg['@']){
var i = 0, to = []; tmp = opt.peers;
for(var k in tmp){ var p = tmp[k]; // TODO: Make it up peers instead!
to.push(p.url || p.pid || p.id);
@ -207,6 +208,7 @@ function Mesh(root){
}
if(i > 1){ msg['><'] = to.join() } // TODO: BUG! This gets set regardless of peers sent to! Detect?
}
if(msg.put && (tmp = msg.ok)){ msg.ok = {'@':(tmp['@']||1)-1, '/': (tmp['/']==msg._.near)? mesh.near : tmp['/']}; }
if(put = meta.$put){
tmp = {}; Object.keys(msg).forEach(function(k){ tmp[k] = msg[k] });
tmp.put = ':])([:';
@ -253,18 +255,22 @@ function Mesh(root){
(peer.queue = peer.queue || []).push(raw);
}}
mesh.near = 0;
mesh.hi = function(peer){
var wire = peer.wire, tmp;
if(!wire){ mesh.wire((peer.length && {url: peer, id: peer}) || peer); return }
if(peer.id){
opt.peers[peer.url || peer.id] = peer;
} else {
tmp = peer.id = peer.id || String.random(9);
tmp = peer.id = peer.id || peer.url || String.random(9);
mesh.say({dam: '?', pid: root.opt.pid}, opt.peers[tmp] = peer);
delete dup.s[peer.last]; // IMPORTANT: see https://gun.eco/docs/DAM#self
}
peer.met = peer.met || +(new Date);
if(!wire.hied){ root.on(wire.hied = 'hi', peer) }
if(!peer.met){
mesh.near++;
peer.met = +(new Date);
root.on('hi', peer)
}
// @rogowski I need this here by default for now to fix go1dfish's bug
tmp = peer.queue; peer.queue = [];
setTimeout.each(tmp||[],function(msg){
@ -273,6 +279,8 @@ function Mesh(root){
//Type.obj.native && Type.obj.native(); // dirty place to check if other JS polluted.
}
mesh.bye = function(peer){
peer.met && --mesh.near;
delete peer.met;
root.on('bye', peer);
var tmp = +(new Date); tmp = (tmp - (peer.met||tmp));
mesh.bye.time = ((mesh.bye.time || tmp) + tmp) / 2;
@ -286,6 +294,13 @@ function Mesh(root){
mesh.say({dam: '?', pid: opt.pid, '@': msg['#']}, peer);
delete dup.s[peer.last]; // IMPORTANT: see https://gun.eco/docs/DAM#self
}
mesh.hear['mob'] = function(msg, peer){ // NOTE: AXE will overload this with better logic.
if(!msg.peers){ return }
var peers = Object.keys(msg.peers), one = peers[(Math.random()*peers.length) >> 0];
if(!one){ return }
mesh.bye(peer);
mesh.hi(one);
}
root.on('create', function(root){
root.opt.pid = root.opt.pid || String.random(9);
@ -303,17 +318,22 @@ function Mesh(root){
var gets = {};
root.on('bye', function(peer, tmp){ this.to.next(peer);
if(tmp = console.STAT){ tmp.peers = (tmp.peers || 0) - 1; }
if(tmp = console.STAT){ tmp.peers = mesh.near; }
if(!(tmp = peer.url)){ return } gets[tmp] = true;
setTimeout(function(){ delete gets[tmp] },opt.lack || 9000);
});
root.on('hi', function(peer, tmp){ this.to.next(peer);
if(tmp = console.STAT){ tmp.peers = (tmp.peers || 0) + 1 }
if(!(tmp = peer.url) || !gets[tmp]){ return } delete gets[tmp];
if(tmp = console.STAT){ tmp.peers = mesh.near }
if(opt.super){ return } // temporary (?) until we have better fix/solution?
setTimeout.each(Object.keys(root.next), function(soul){ var node = root.next[soul]; // TODO: .keys( is slow
tmp = {}; tmp[soul] = root.graph[soul]; tmp = String.hash(tmp); // TODO: BUG! This is broken.
mesh.say({'##': tmp, get: {'#': soul}}, peer);
var souls = Object.keys(root.next||''); // TODO: .keys( is slow
if(souls.length > 9999 && !console.SUBS){ console.log(console.SUBS = "Warning: You have more than 10K live GETs, which might use more bandwidth than your screen can show - consider `.off()`.") }
setTimeout.each(souls, function(soul){ var node = root.next[soul];
if(opt.super || (node.ask||'')['']){ mesh.say({get: {'#': soul}}, peer); return }
setTimeout.each(Object.keys(node.ask||''), function(key){ if(!key){ return }
// is the lack of ## a !onion hint?
mesh.say({'##': String.hash((root.graph[soul]||'')[key]), get: {'#': soul, '.': key}}, peer);
// TODO: Switch this so Book could route?
})
});
});

View File

@ -101,9 +101,10 @@ function stun(as, id){
function ran(as){
if(as.err){ ran.end(as.stun, as.root); return } // move log handle here.
if(as.todo.length || as.end || !Object.empty(as.wait)){ return } as.end = 1;
//(as.retry = function(){ as.acks = 0;
var cat = (as.$.back(-1)._), root = cat.root, ask = cat.ask(function(ack){
root.on('ack', ack);
if(ack.err){ Gun.log(ack) }
if(ack.err && !ack.lack){ Gun.log(ack) }
if(++acks > (as.acks || 0)){ this.off() } // Adjustable ACKs! Only 1 by default.
if(!as.ack){ return }
as.ack(ack, this);
@ -114,7 +115,9 @@ function ran(as){
setTimeout.each(Object.keys(stun = stun.add||''), function(cb){ if(cb = stun[cb]){cb()} }); // resume the stunned reads // Any perf reasons to CPU schedule this .keys( ?
}).hatch = tmp; // this is not official yet ^
//console.log(1, "PUT", as.run, as.graph);
(as.via._).on('out', {put: as.out = as.graph, ok: as.ok || as.opt, opt: as.opt, '#': ask, _: tmp});
if(as.ack && !as.ok){ as.ok = as.acks || 9 } // TODO: In future! Remove this! This is just old API support.
(as.via._).on('out', {put: as.out = as.graph, ok: as.ok && {'@': as.ok+1}, opt: as.opt, '#': ask, _: tmp});
//})();
}; ran.end = function(stun,root){
stun.end = noop; // like with the earlier id, cheaper to make this flag a function so below callbacks do not have to do an extra type check.
if(stun.the.to === stun && stun === stun.the.last){ delete root.stun }

View File

@ -128,7 +128,7 @@ Gun.ask = require('./ask');
console.STAT && console.STAT(((DBG||ctx).Hf = +new Date), tmp, 'future');
return;
}
if(state < was){ /*old;*/ if(!ctx.miss){ return } } // but some chains have a cache miss that need to re-fire. // TODO: Improve in future. // for AXE this would reduce rebroadcast, but GUN does it on message forwarding.
if(state < was){ /*old;*/ if(true || !ctx.miss){ return } } // but some chains have a cache miss that need to re-fire. // TODO: Improve in future. // for AXE this would reduce rebroadcast, but GUN does it on message forwarding. // TURNS OUT CACHE MISS WAS NOT NEEDED FOR NEW CHAINS ANYMORE!!! DANGER DANGER DANGER, ALWAYS RETURN! (or am I missing something?)
if(!ctx.faith){ // TODO: BUG? Can this be used for cache miss as well? // Yes this was a bug, need to check cache miss for RAD tests, but should we care about the faith check now? Probably not.
if(state === was && (val === known || L(val) <= L(known))){ /*console.log("same");*/ /*same;*/ if(!ctx.miss){ return } } // same
}
@ -136,14 +136,19 @@ Gun.ask = require('./ask');
var aid = msg['#']+ctx.all++, id = {toString: function(){ return aid }, _: ctx}; id.toJSON = id.toString; // this *trick* makes it compatible between old & new versions.
root.dup.track(id)['#'] = msg['#']; // fixes new OK acks for RPC like RTC.
DBG && (DBG.ph = DBG.ph || +new Date);
root.on('put', {'#': id, '@': msg['@'], put: {'#': soul, '.': key, ':': val, '>': state}, _: ctx});
root.on('put', {'#': id, '@': msg['@'], put: {'#': soul, '.': key, ':': val, '>': state}, ok: msg.ok, _: ctx});
}
function map(msg){
var DBG; if(DBG = (msg._||'').DBG){ DBG.pa = +new Date; DBG.pm = DBG.pm || +new Date}
var eve = this, root = eve.as, graph = root.graph, ctx = msg._, put = msg.put, soul = put['#'], key = put['.'], val = put[':'], state = put['>'], id = msg['#'], tmp;
if((tmp = ctx.msg) && (tmp = tmp.put) && (tmp = tmp[soul])){ state_ify(tmp, key, state, val, soul) } // necessary! or else out messages do not get SEA transforms.
//var bytes = ((graph[soul]||'')[key]||'').length||1;
graph[soul] = state_ify(graph[soul], key, state, val, soul);
if(tmp = (root.next||'')[soul]){ tmp.on('in', msg) }
if(tmp = (root.next||'')[soul]){
//tmp.bytes = (tmp.bytes||0) + ((val||'').length||1) - bytes;
//if(tmp.bytes > 2**13){ Gun.log.once('byte-limit', "Note: In the future, GUN peers will enforce a ~4KB query limit. Please see https://gun.eco/docs/Page") }
tmp.on('in', msg)
}
fire(ctx);
eve.to.next(msg);
}
@ -163,11 +168,11 @@ Gun.ask = require('./ask');
CF(); // courtesy check;
}
function ack(msg){ // aggregate ACKs.
var id = msg['@'] || '', ctx;
var id = msg['@'] || '', ctx, ok, tmp;
if(!(ctx = id._)){
var dup = (dup = msg.$) && (dup = dup._) && (dup = dup.root) && (dup = dup.dup);
if(!(dup = dup.check(id))){ return }
msg['@'] = dup['#'] || msg['@'];
msg['@'] = dup['#'] || msg['@']; // This doesn't do anything anymore, backtrack it to something else?
return;
}
ctx.acks = (ctx.acks||0) + 1;
@ -175,13 +180,14 @@ Gun.ask = require('./ask');
msg['@'] = ctx['#'];
fire(ctx); // TODO: BUG? How it skips/stops propagation of msg if any 1 item is error, this would assume a whole batch/resync has same malicious intent.
}
ctx.ok = msg.ok || ctx.ok;
if(!ctx.stop && !ctx.crack){ ctx.crack = ctx.match && ctx.match.push(function(){back(ctx)}) } // handle synchronous acks. NOTE: If a storage peer ACKs synchronously then the PUT loop has not even counted up how many items need to be processed, so ctx.STOP flags this and adds only 1 callback to the end of the PUT loop.
back(ctx);
}
function back(ctx){
if(!ctx || !ctx.root){ return }
if(ctx.stun || ctx.acks !== ctx.all){ return }
ctx.root.on('in', {'@': ctx['#'], err: ctx.err, ok: ctx.err? u : {'':1}});
ctx.root.on('in', {'@': ctx['#'], err: ctx.err, ok: ctx.err? u : ctx.ok || {'':1}});
}
var ERR = "Error: Invalid graph!";

View File

@ -46,12 +46,18 @@ Object.keys = Object.keys || function(o){
for(var k in o){ if(has.call(o, k)){ l.push(k) } }
return l;
}
;(function(){ // max ~1ms or before stack overflow
var u, sT = setTimeout, l = 0, c = 0, sI = (typeof setImmediate !== ''+u && setImmediate) || sT; // queueMicrotask faster but blocks UI
sT.hold = sT.hold || 9;
sT.poll = sT.poll || function(f){ //f(); return; // for testing
if((sT.hold >= (+new Date - l)) && c++ < 3333){ f(); return }
sI(function(){ l = +new Date; f() },c=0)
;(function(){
var u, sT = setTimeout, l = 0, c = 0
, sI = (typeof setImmediate !== ''+u && setImmediate) || (function(c,f){
if(typeof MessageChannel == ''+u){ return sT }
(c = new MessageChannel()).port1.onmessage = function(e){ ''==e.data && f() }
return function(q){ f=q;c.port2.postMessage('') }
}()), check = sT.check = sT.check || (typeof performance !== ''+u && performance)
|| {now: function(){ return +new Date }};
sT.hold = sT.hold || 9; // half a frame benchmarks faster than < 1ms?
sT.poll = sT.poll || function(f){
if((sT.hold >= (check.now() - l)) && c++ < 3333){ f(); return }
sI(function(){ l = check.now(); f() },c=0)
}
}());
;(function(){ // Too many polls block, this "threads" them in turns over a single thread in time.

View File

@ -25,10 +25,10 @@ Gun.on('opt', function(root){
var url = peer.url.replace(/^http/, 'ws');
var wire = peer.wire = new opt.WebSocket(url);
wire.onclose = function(){
opt.mesh.bye(peer);
reconnect(peer);
opt.mesh.bye(peer);
};
wire.onerror = function(error){
wire.onerror = function(err){
reconnect(peer);
};
wire.onopen = function(){
@ -39,7 +39,7 @@ Gun.on('opt', function(root){
opt.mesh.hear(msg.data || msg, peer);
};
return wire;
}catch(e){}}
}catch(e){ opt.mesh.bye(peer) }}
setTimeout(function(){ !opt.super && root.on('out', {dam:'hi'}) },1); // it can take a while to open a socket, so maybe no longer lazy load for perf reasons?