put debug (dbg)

This commit is contained in:
Mark Nadal 2020-02-25 12:29:28 -08:00
parent 74f6638227
commit e5c06e7491
5 changed files with 53 additions and 27 deletions

2
axe.js
View File

@ -69,7 +69,7 @@
The mob threshold might be determined by other factors,
like how much RAM or CPU stress we have.
*/
opt.mob = opt.mob || Infinity;
opt.mob = opt.mob || 9876 || Infinity;
var mesh = opt.mesh = opt.mesh || Gun.Mesh(at);
console.log("AXE enabled.");

14
gun.js
View File

@ -719,6 +719,7 @@
ctx.out = msg;
ctx.lot = {s: 0, more: 1};
var S = +new Date;
DBG && (DBG.p = S);
for(var soul in put){ // Gun.obj.native() makes this safe.
var node = put[soul], states;
if(!node){ err = ERR+cut(soul)+"no node."; break }
@ -734,7 +735,7 @@
}
if(err){ break }
}
DBG && (DBG.p = +new Date);
DBG && (DBG.pe = +new Date);
if(console.STAT){ console.STAT(S, +new Date - S, 'mix');console.STAT(S, ctx.lot.s, 'mix #') }
if(ctx.err = err){ root.on('in', {'@': id, err: Gun.log(err)}); return }
if(!(--ctx.lot.more)){ fire(ctx) } // if synchronous.
@ -767,10 +768,8 @@
if((tmp = ctx.out) && (tmp = tmp.put)){
tmp[soul] = state_ify(tmp[soul], key, state, val, soul); // TODO: Hacky, fix & come back later, for actual pushing messages.
}
(msg._||'').DBG && ((msg._||'').DBG.cm = +new Date);
if(!(--ctx.lot.more)){ fire(ctx) }
eve.to.next(msg);
(msg._||'').DBG && ((msg._||'').DBG.ce = +new Date);
}
function chain(ctx, soul, key,val, state){
var root = ctx.root, put, tmp;
@ -2092,9 +2091,8 @@
}
var S = +new Date, ST;
DBG && (DBG.is = S);
//root.on('in', msg);
ECHO = msg.put || ECHO;
(msg.ok !== -3740) && mesh.say({ok: -3740, put: ECHO, '@': msg['#']}, peer);
root.on('in', msg);
//ECHO = msg.put || ECHO; !(msg.ok !== -3740) && mesh.say({ok: -3740, put: ECHO, '@': msg['#']}, peer);
DBG && (DBG.hd = +new Date);
console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'msg');
dup_track(id).via = peer;
@ -2104,7 +2102,6 @@
var tomap = function(k,i,m){m(k,true)};
var noop = function(){};
hear.c = hear.d = 0;
var ECHO;
;(function(){
var SMIA = 0;
@ -2114,7 +2111,8 @@
if(this.to){ this.to.next(msg) } // compatible with middleware adapters.
if(!msg){ return false }
var id, hash, tmp, raw;
var S = +new Date; //msg.DBG_s = msg.DBG_s || +new Date;
var S = +new Date, DBG = msg.DBG;
DBG && (DBG.y = +new Date);
var meta = msg._||(msg._=function(){});
if(!(id = msg['#'])){ id = msg['#'] = Type.text.random(9) }
//if(!(hash = msg['##']) && u !== msg.put){ hash = msg['##'] = Type.obj.hash(msg.put) }

View File

@ -40,7 +40,7 @@
1. Because writing to disk takes time, we should batch data to disk. This improves performance, and reduces potential disk corruption.
2. If a batch exceeds a certain number of writes, we should immediately write to disk when physically possible. This caps total performance, but reduces potential loss.
*/
var r = function(key, data, cb, tag){
var r = function(key, data, cb, tag, DBG){
if('function' === typeof data){
var o = cb || {};
cb = data;
@ -49,16 +49,20 @@
}
//var tmp = (tmp = r.batch = r.batch || {})[key] = tmp[key] || {};
//var tmp = (tmp = r.batch = r.batch || {})[key] = data;
r.save(key, data, cb, tag);
r.save(key, data, cb, tag, DBG);
}
r.save = function(key, data, cb, tag){
r.save = function(key, data, cb, tag, DBG){
var s = {key: key}, tags, f, d, q;
s.find = function(file){ var tmp;
s.file = file || (file = opt.code.from);
DBG && (DBG = DBG[file] = DBG[file] || {});
DBG && (DBG.sf = DBG.sf || +new Date);
if(tmp = r.disk[file]){ s.mix(u, tmp); return }
r.parse(file, s.mix);
r.parse(file, s.mix, null, DBG);
}
s.mix = function(err, disk){
DBG && (DBG.sml = +new Date);
DBG && (DBG.sm = DBG.sm || +new Date);
if(s.err = err || s.err){ cb(err); return } // TODO: HANDLE BATCH EMIT
var file = s.file = (disk||'').file || s.file, tmp;
if(!disk && file !== opt.code.from){ // corrupt file?
@ -76,19 +80,23 @@
(tmp = (tmp = disk.tags || (disk.tags = {}))[tag] || (tmp[tag] = r.tags[tag] || (r.tags[tag] = {})))[file] || (tmp[file] = r.one[tag] || (r.one[tag] = cb));
cb = null;
}
DBG && (DBG.st = DBG.st || +new Date);
if(disk.Q){ cb && disk.Q.push(cb); return } disk.Q = (cb? [cb] : []);
disk.to = setTimeout(s.write, opt.until);
}
s.write = function(){
DBG && (DBG.sto = DBG.sto || +new Date);
var file = f = s.file, disk = d = s.disk;
q = s.q = disk.Q;
tags = s.tags = disk.tags;
delete disk.Q;
delete r.disk[file];
delete disk.tags;
r.write(file, disk, s.ack);
r.write(file, disk, s.ack, null, DBG);
}
s.ack = function puff(err, ok){
DBG && (DBG.sa = DBG.sa || +new Date);
DBG && (DBG.sal = q.length);
var ack, tmp;
for(var id in r.tags){
if(!r.tags.hasOwnProperty(id)){ continue } var tag = r.tags[id];
@ -113,9 +121,9 @@
console.STAT && console.STAT(S, +new Date - S, 'rad acks', ename(s.file));
console.STAT && console.STAT(S, q.length, 'rad acks #', ename(s.file));
}
cb || (cb = function(err, ok){
cb || (cb = function(err, ok){ // test delete!
if(!err){ return }
})
});
r.find(key, s.find);
}
r.disk = {};
@ -129,7 +137,7 @@
the question is just how long you delay it.
*/
var RWC = 0;
r.write = function(file, rad, cb, o){
r.write = function(file, rad, cb, o, DBG){
if(!rad){ cb('No radix!'); return }
o = ('object' == typeof o)? o : {force: o};
var f = function Fractal(){}, a, b;
@ -140,9 +148,12 @@
var text = rad.raw = f.text;
r.disk[file = rad.file || f.file || file] = rad;
var S = +new Date;
DBG && (DBG.wd = S);
r.find.add(file, function add(err){
DBG && (DBG.wa = +new Date);
if(err){ cb(err); return }
opt.store.put(ename(file), text, function safe(err, ok){
DBG && (DBG.wp = +new Date);
console.STAT && console.STAT(S, ST = +new Date - S, "wrote disk", JSON.stringify(file), ++RWC, 'total all writes.');
cb(err, ok || 1);
if(!rad.Q){ delete r.disk[file] } // VERY IMPORTANT! Clean up memory, but not if there is already queued writes on it!
@ -151,19 +162,26 @@
}
f.split = function(){
var S = +new Date;
DBG && (DBG.wf = S);
f.text = '';
if(!f.count){ f.count = 0;
Radix.map(rad, function count(){ f.count++ }); // TODO: Perf? Any faster way to get total length?
}
DBG && (DBG.wfc = f.count);
f.limit = Math.ceil(f.count/2);
var SC = f.count;
f.count = 0;
DBG && (DBG.wf1 = +new Date);
f.sub = Radix();
Radix.map(rad, f.slice, {reverse: 1}); // IMPORTANT: DO THIS IN REVERSE, SO LAST HALF OF DATA MOVED TO NEW FILE BEFORE DROPPING FROM CURRENT FILE.
DBG && (DBG.wf2 = +new Date);
r.write(f.end, f.sub, f.both, o);
DBG && (DBG.wf3 = +new Date);
f.hub = Radix();
Radix.map(rad, f.stop);
DBG && (DBG.wf4 = +new Date);
r.write(rad.file, f.hub, f.both, o);
DBG && (DBG.wf5 = +new Date);
console.STAT && console.STAT(S, +new Date - S, "rad split", ename(rad.file), SC);
return true;
}
@ -176,6 +194,7 @@
f.hub(key, val);
}
f.both = function(err, ok){
DBG && (DBG.wfd = +new Date);
if(b){ cb(err || b); return }
if(a){ cb(err, ok); return }
a = true;
@ -190,15 +209,17 @@
}
f.text += enc;
}
if(opt.jsonify){ r.write.jsonify(f, rad, cb, o); return } // temporary testing idea
if(opt.jsonify){ r.write.jsonify(f, rad, cb, o, DBG); return } // temporary testing idea
if(!Radix.map(rad, f.each, true)){ f.write() }
}
r.write.jsonify = function(f, rad, cb, o){
r.write.jsonify = function(f, rad, cb, o, DBG){
var raw;
var S = +new Date;
DBG && (DBG.w = S);
try{raw = JSON.stringify(rad.$);
}catch(e){ cb("Cannot radisk!"); return }
DBG && (DBG.ws = +new Date);
console.STAT && console.STAT(S, +new Date - S, "rad stringified JSON");
if(opt.chunk < raw.length && !o.force){
var c = 0;
@ -298,12 +319,13 @@
*/
var RPC = 0;
var Q = {}, s = String.fromCharCode(31);
r.parse = function(file, cb, raw){ var q;
r.parse = function(file, cb, raw, DBG){ var q;
if(!file){ return cb(); }
if(q = Q[file]){ q.push(cb); return } q = Q[file] = [cb];
var p = function Parse(){}, info = {file: file};
(p.disk = Radix()).file = file;
p.read = function(err, data){ var tmp;
DBG && (DBG.rpg = +new Date);
console.STAT && console.STAT(S, +new Date - S, 'read disk', JSON.stringify(file), ++RPC, 'total all parses.');
delete Q[file];
if((p.err = err) || (p.not = !data)){ p.map(q, p.ack); return }
@ -318,12 +340,15 @@
if(p.err){ p.map(q, p.ack); return }
}
info.parsed = data.length;
DBG && (DBG.rpl = info.parsed);
DBG && (DBG.rpa = q.length);
S = +new Date;
if(opt.jsonify || '{' === data[0]){
try{
var json = JSON.parse(data); // TODO: this caused a out-of-memory crash!
p.disk.$ = json;
console.STAT && (ST = +new Date - S) > 9 && console.STAT(S, ST, 'rad parsed JSON');
DBG && (DBG.rpd = +new Date);
p.map(q, p.ack);
return;
}catch(e){ tmp = e }
@ -396,8 +421,9 @@
l[2] = t.slice(i + o.i);
return l;
}
var S = +new Date;
if(r.disk){ raw || (raw = (r.disk[file]||'').raw) }
var S = +new Date;
DBG && (DBG.rp = S);
if(raw){ return p.read(u, raw) }
opt.store.get(ename(file), p.read);
// TODO: What if memory disk gets filled with updates, and we get an old one back?

View File

@ -9,7 +9,7 @@
root.opt.log = root.opt.log || Gun.log;
this.to.next(root);
})
//require('../nts');
require('../nts');
require('./store');
require('./rs3');
require('./wire');
@ -17,8 +17,7 @@
try{require('../axe');}catch(e){}
//require('./file');
require('./evict');
//require('./multicast');
require('./multicast');
require('./stats');
if('debug' === process.env.GUN_ENV){ require('./debug') }
module.exports = Gun;
}());

View File

@ -15,14 +15,17 @@ Gun.on('create', function(root){
root.on('put', function(msg){
this.to.next(msg);
var id = msg['#'], put = msg.put, soul = put['#'], key = put['.'], val = put[':'], state = put['>'], tmp;
var DBG = (msg._||'').DBG; DBG && (DBG.sp = DBG.sp || +new Date);
var lot = (msg._||'').lot||''; count[id] = (count[id] || 0) + 1;
var S = (msg._||'').RPS || ((msg._||'').RPS = +new Date);
dare(soul+esc+key, {':': val, '>': state}, dare.one[id] || function(err, ok){
DBG && (DBG.spd = DBG.spd || +new Date);
console.STAT && console.STAT(S, +new Date - S, 'put');
if(!err && count[id] !== lot.s){ console.log(err = "Disk count not same as ram count."); console.STAT && console.STAT(+new Date, lot.s - count[id], 'put ack != count') } delete count[id];
if(err){ root.on('in', {'@': id, err: err}); return }
root.on('in', {'@': id, ok: ok});
}, id);
if(err){ root.on('in', {'@': id, err: err, DBG: DBG}); return }
root.on('in', {'@': id, ok: ok, DBG: DBG});
}, id, (DBG.r = DBG.r || {}));
DBG && (DBG.sps = DBG.sps || +new Date);
});
var count = {}, obj_empty = Gun.obj.empty;