v0.9.x ! Try S3 again?

This commit is contained in:
Mark Nadal 2017-11-18 20:33:15 -08:00
parent c987e42820
commit 95280b89f8
15 changed files with 66 additions and 295 deletions

2
gun.js
View File

@ -832,8 +832,6 @@
if(typeof window !== "undefined"){ window.Gun = Gun }
if(typeof common !== "undefined"){ common.exports = Gun }
module.exports = Gun;
Gun.log.once("0.8", "0.8 WARNING! Breaking changes, test that your app works before upgrading! The adapter interface has been upgraded (non-default storage and transport layers probably won't work). Also, `.path()` and `.not()` are outside core and now in 'lib/'.");
})(require, './root');
;require(function(module){

2
gun.min.js vendored

File diff suppressed because one or more lines are too long

View File

@ -10,6 +10,7 @@ Gun.on('opt', function(ctx){
this.to.next(ctx);
var opt = ctx.opt;
if(ctx.once){ return }
if(process.env.AWS_S3_BUCKET){ return }
opt.file = String(opt.file || 'data.json');
var graph = ctx.graph, acks = {}, count = 0, to;
var disk = Gun.obj.ify((fs.existsSync || require('path').existsSync)(opt.file)?

View File

@ -177,7 +177,6 @@ function Radisk(opt){
// TODO: BUG!!! If a node spans multiple file chunks, it won't return all!
// TODO: BUG!!! If a node spans multiple file chunks, it won't return all!
// TODO: BUG!!! If a node spans multiple file chunks, it won't return all!
var id = Gun.text.random(3);
if(RAD){ // cache
var val = RAD(key);
if(u !== val){
@ -188,10 +187,10 @@ function Radisk(opt){
g.lex = function(file){
if(!file || file > key){
if(tmp = q[g.file]){
tmp.push(cb);
tmp.push({key: key, ack: cb});
return true;
}
q[g.file] = [cb];
q[g.file] = [{key: key, ack: cb}];
r.parse(g.file, g.it);
return true;
}
@ -199,15 +198,13 @@ function Radisk(opt){
}
g.it = function(err, disk){
if(g.err = err){ Gun.log(err) }
if(disk){
RAD = disk;
g.val = disk(key);
}
if(disk){ RAD = disk }
disk = q[g.file]; Gun.obj.del(q, g.file);
Gun.obj.map(disk, g.ack);
}
g.ack = function(ack){
ack(g.err, g.val);
g.ack = function(as){
if(!as.ack){ return }
as.ack(g.err, RAD(as.key));
}
opt.store.list(g.lex);
}

View File

@ -1,242 +0,0 @@
var fs = require('fs');
var Gun = require('../gun');
var Radix = require('./radix');
function Radisk(opt){
/*
Any and all storage adapters should...
1. If not busy, write to disk immediately.
2. If busy, batch to disk. (Improves performance, reduces potential disk corruption)
3. If a batch exceeds a certain number of writes, force atomic batch to disk. (This caps total performance, but reduces potential loss)
*/
var radisk = function(key, val, cb){
key = ''+key;
if(0 <= key.indexOf('_') || 0 <= key.indexOf('$')){ // TODO: BUG! Fix!
var err = "ERROR: Radix and Radisk not tested against _ or $ keys!";
console.log(err);
cb = cb || val;
if(cb instanceof Function){ cb(err) }
return;
}
if(val instanceof Function){
cb = val;
val = radisk.batch(key);
if(u !== val){
return cb(null, val);
}
if(radisk.was){
val = radisk.was(key);
if(u !== val){
return cb(null, val);
}
}
console.log("READ FROM DISK");
return cb(null, val);
}
radisk.batch(key, val);
if(cb){ radisk.batch.acks.push(cb) }
if(!count++){ return thrash() } // (1)
if(opt.batch <= count){ return thrash() } // (3)
clearTimeout(to); // (2)
to = setTimeout(thrash, opt.wait);
};
radisk.batch = Radix();
radisk.batch.acks = [];
var count = 0, wait, to, u;
opt = opt || {};
opt.file = String(opt.file || 'radata');
opt.size = opt.size || (1024 * 1024 * 10); // 10MB
opt.batch = opt.batch || 10 * 1000;
opt.wait = opt.wait || 1;
opt.nest = opt.nest || ' ';
console.log("Warning: Radix storage engine has not been tested with all types of values and keys yet.");
if(!fs.existsSync(opt.file)){ fs.mkdirSync(opt.file) }
var thrash = function(){
if(wait){ return }
clearTimeout(to);
wait = true;
var was = radisk.was = radisk.batch;
radisk.batch = null;
radisk.batch = Radix();
radisk.batch.acks = [];
chunk(radisk.was, function(err, ok){
radisk.was = null;
wait = false;
var tmp = count;
count = 0;
Gun.obj.map(was.acks, function(cb){cb(err, ok)});
if(1 < tmp){ thrash() }
});
}
/*
1. Find the first radix item in memory.
2. Use that as the starting index in the directory of files.
3. Find the first file that is lexically larger than it,
4. Read the previous file to that into memory
5. Scan through the in memory radix for all values lexically less than the limit.
6. Merge and write all of those to the in-memory file and back to disk.
7. If file to large, split. More details needed here.
*/
function chunk(radix, cb){
var step = {
check: function(tree, key){
if(key < step.start){ return }
step.start = key;
fs.readdir(opt.file, step.match);
return true;
},
match: function(err, dir){
step.dir = dir;
if(!dir.length){
step.file = '0';
return step.merge(null, Radix());
}
Gun.obj.map(dir, step.lex);
read(step.file, step.merge);
},
lex: function(file){
if(file > step.start){
return step.end = file;
}
step.file = file;
},
merge: function(err, disk){
if(err){ return console.log("ERROR!!!", err) }
step.disk = disk;
Radix.map(radix, step.add);
write(step.file, step.disk, step.done);
},
add: function(val, key){
if(key < step.start){ return }
if(step.end && step.end < key){ return step.next = key; }
step.disk(key, val);
},
done: function(err){
if(err){ console.log("ERROR!!!", err) }
if(!step.next){
return cb(err);
}
step.start = step.next;
step.end = step.next = step.file = u;
Radix.map(radix, step.check);
}
}
Radix.map(radix, step.check);
}
/*
Any storage engine at some point will have to do a read in order to write.
This is true of even systems that use an append only log, if they support updates.
Therefore it is unavoidable that a read will have to happen,
the question is just how long you delay it.
*/
var write = function(file, radix, cb){
var step = {
rest: "",
count: 0,
file: file,
each: function(val, key, k, pre){
step.count++;
if(opt.size < step.rest.length){
step.rest = "";
step.limit = Math.ceil(step.count/2);
step.count = 0;
step.sub = Radix();
Radix.map(radix, step.slice);
return true;
}
var i = pre.length;
while(i--){ step.rest += opt.nest };
step.rest += encode(k) + (u === val? '' : '=' + encode(val)) + '\n';
},
dump: function(){
var rest = step.rest;
step.rest = "";
fs.writeFile(opt.file +'/'+ file, rest, cb);
console.log("DUMP");
if(opt.disk){ opt.disk(opt.file+'/'+file, rest, cb) }
},
slice: function(val, key){
if(key < step.file){ return }
if(step.limit < (++step.count)){
var name = step.file;
step.file = key;
step.count = 0;
write(name, step.sub, step.next);
return true;
}
step.sub(key, val);
},
next: function(err){
if(err){ console.log("ERR!!!!") }
step.sub = Radix();
if(!Radix.map(radix, step.slice)){
write(step.file, step.sub, cb);
}
}
};
if(!Radix.map(radix, step.each, true)){ step.dump() }
}
var split = String.fromCharCode(31), esc = JSON.stringify, fix = JSON.parse;
function encode(data){
return split + esc(data) + split;
}
function decode(data){
if(!data){ return '' }
var i = -1, c, f, t = '';
while(c = data[++i]){
if(f){
if(split === c){
try{ t = fix(t);
}catch(e){ t = '' }
return t;
}
t += c;
} else
if(split === c){
f = true;
}
}
return '';
}
/*
Let us start by assuming we are the only process that is
changing the directory or bucket. Not because we do not want
to be multi-process/machine, but because we want to experiment
with how much performance and scale we can get out of only one.
Then we can work on the harder problem of being multi-process.
*/
var read = function(file, cb){
var step = {
nest: 0,
rad: Radix(),
data: function(err, data){
if(err){ return console.log("ERROR READING FILE!", err) }
step.pre = [];
Gun.obj.map(data.toString().split('\n'), step.split); // TODO: Escape!
cb(null, step.rad);
},
split: function(line){ var LINE = line;
var nest = -1; while(opt.nest === line[++nest]){};
if(nest){ line = line.slice(nest) }
if(nest <= step.nest){ step.pre = step.pre.slice(0, nest - step.nest - 1) }
line = line.split('='); step.pre.push(line[0]);
if(1 < line.length){ step.rad(step.pre.join(''), line[1]) }
step.nest = nest;
}
}
fs.readFile(opt.file +'/'+ file, step.data);
}
radisk.read = read;
return radisk;
}
module.exports = Radisk;

View File

@ -12,14 +12,16 @@ var Gun = require('../gun');
k += key[++i];
}
if(!at){
if(u === val && i == l){ return }
if(!map(t, function(r, s){
var ii = 0, kk = '';
while(s[ii] == key[ii]){
kk += s[ii++];
}
if(kk){
if(u === val){ return (tmp || (tmp = {}))[s.slice(ii)] = r; }
if(u === val){
if(ii <= l){ return }
return (tmp || (tmp = {}))[s.slice(ii)] = r;
}
var __ = {};
__[s.slice(ii)] = r;
(__[key.slice(ii)] = {})[$] = val;
@ -30,7 +32,7 @@ var Gun = require('../gun');
})){
if(u === val){ return; }
(t[k] || (t[k] = {}))[$] = val;
} else
}
if(u === val){
return tmp;
}
@ -65,4 +67,4 @@ var Gun = require('../gun');
Object.keys = Object.keys || function(o){ return map(o, function(v,k,t){t(k)}) }
module.exports = Radix;
}());
}());

View File

@ -10,8 +10,8 @@ Gun.on('opt', function(ctx){
var opt = ctx.opt;
if(ctx.once){ return }
if(!process.env.AWS_S3_BUCKET){ return }
opt.batch = opt.batch || 10 * 1000;
opt.wait = opt.wait || 1000 * 15;
opt.batch = opt.batch || (1000 * 10);
opt.wait = opt.wait || (1000 * 15);
opt.size = opt.size || (1024 * 1024 * 10); // 10MB
var opts = opt.s3 || (opt.s3 = {});
@ -43,12 +43,11 @@ function Store(opt){
s3.putObject(params, cb);
};
store.get = function(file, cb){
var params = {Bucket: opts.bucket, Key: file};
var params = {Bucket: opts.bucket, Key: file||''};
s3.getObject(params, function(err, ack){
if(!ack){ return cb(err) }
var data = ack.Body;
if(!ack){ return cb(null) }
var data = (ack||{}).Body;
if(data){ data = data.toString() }
console.log("HERE WE GO!", data);
cb(err, data);
});
};
@ -61,7 +60,6 @@ function Store(opt){
})){ return }
if(!data.IsTruncated){ return cb() } // Stream interface requires a final call to know when to be done.
params.ContinuationToken = data.NextContinuationToken;
console.log("get further list...");
store.list(cb, match, params);
});
};

View File

@ -2,7 +2,8 @@
var Gun = require('../gun');
Gun.serve = require('./serve');
require('../nts');
require('./s3');
require('./store');
require('./rs3');
try{require('./ws');}catch(e){require('./wsp/server');}
require('./verify');
require('./file');

View File

@ -1,4 +1,4 @@
var Gun = require('gun/gun');
var Gun = require('../gun');
var Radisk = require('./radisk');
var fs = require('fs');
var Radix = Radisk.Radix;
@ -8,6 +8,7 @@ Gun.on('opt', function(ctx){
this.to.next(ctx);
var opt = ctx.opt;
if(ctx.once){ return }
if(!process.env.AWS_S3_BUCKET){ return } // TODO: Remove this after migration.
opt.store = opt.store || Store(opt);
var rad = Radisk(opt);
@ -88,13 +89,17 @@ function Store(opt){
module.exports = Store;
;(function(){//return;
global.Gun = require('../gun');
/*process.env.AWS_S3_BUCKET = 'test-s3';
;(function(){return;
process.env.AWS_S3_BUCKET = 'test-s3';
process.env.AWS_ACCESS_KEY_ID = 'asdf';
process.env.AWS_SECRET_ACCESS_KEY = 'fdsa';
process.env.fakes3 = 'http://localhost:4567';
process.env.AWS_S3_THROTTLE = 0;*/
process.env.AWS_S3_THROTTLE = 0;
return;
global.Gun = require('../gun');
require('./rs3');
require('../test/abc');
}());

22
package-lock.json generated
View File

@ -1,6 +1,6 @@
{
"name": "gun",
"version": "0.8.8",
"version": "0.9.0",
"lockfileVersion": 1,
"requires": true,
"dependencies": {
@ -16,15 +16,15 @@
"integrity": "sha1-Q96AKTD4JvxZj/73VtqBR42GEaM=",
"dev": true,
"requires": {
"asn1.js": "4.9.1",
"asn1.js": "4.9.2",
"base64url": "2.0.0",
"elliptic": "6.4.0"
}
},
"@trust/webcrypto": {
"version": "0.5.0",
"resolved": "https://registry.npmjs.org/@trust/webcrypto/-/webcrypto-0.5.0.tgz",
"integrity": "sha1-XURt2vp1WIJDVuhSQqMdlGj9ayE=",
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/@trust/webcrypto/-/webcrypto-0.7.1.tgz",
"integrity": "sha512-aix+LOG/3Ku3MzClfVxVH88QbSdIL1HcBQ+gjXL/VnX05uyORf28CaQZOvsoEcCzGnWIVBUNwE2gxLBapWANWw==",
"dev": true,
"requires": {
"@trust/keyto": "0.3.1",
@ -74,9 +74,9 @@
"dev": true
},
"asn1.js": {
"version": "4.9.1",
"resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.9.1.tgz",
"integrity": "sha1-SLokC0WpKA6UdImQull9IWYX/UA=",
"version": "4.9.2",
"resolved": "https://registry.npmjs.org/asn1.js/-/asn1.js-4.9.2.tgz",
"integrity": "sha512-b/OsSjvWEo8Pi8H0zsDd2P6Uqo2TK2pH8gNLSJtNLM2Db0v2QaAZ0pBQJXVjAn4gBuugeVDr7s63ZogpUIwWDg==",
"dev": true,
"requires": {
"bn.js": "4.11.8",
@ -85,9 +85,9 @@
}
},
"aws-sdk": {
"version": "2.133.0",
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.133.0.tgz",
"integrity": "sha1-am2JiL01kowRu9PbZ3rVs+rnEHo=",
"version": "2.153.0",
"resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.153.0.tgz",
"integrity": "sha1-a6IuhJruFq1wdxihjGS3Vtjx6Bo=",
"requires": {
"buffer": "4.9.1",
"crypto-browserify": "1.0.9",

View File

@ -1,6 +1,6 @@
{
"name": "gun",
"version": "0.8.9",
"version": "0.9.0",
"description": "Graph engine",
"main": "index.js",
"browser": "gun.min.js",
@ -45,10 +45,10 @@
},
"homepage": "https://github.com/amark/gun#readme",
"engines": {
"node": ">=0.6.6"
"node": ">=0.8.4"
},
"dependencies": {
"aws-sdk": ">=2.86.0",
"aws-sdk": ">=2.153.0",
"ws": "~>2.2.3"
},
"devDependencies": {

View File

@ -191,6 +191,4 @@ Gun.log.once("welcome", "Hello wonderful person! :) Thanks for using GUN, feel f
if(typeof window !== "undefined"){ window.Gun = Gun }
if(typeof common !== "undefined"){ common.exports = Gun }
module.exports = Gun;
Gun.log.once("0.8", "0.8 WARNING! Breaking changes, test that your app works before upgrading! The adapter interface has been upgraded (non-default storage and transport layers probably won't work). Also, `.path()` and `.not()` are outside core and now in 'lib/'.");

View File

@ -3176,7 +3176,6 @@ describe('Gun', function(){
setTimeout(function(){
var gun2 = Gun();
//console.log(require('fs').readFileSync('./radata/!').toString());
//console.debug.i=1;console.log("-----------------");
gun2.get('stef').get('address').val(function(data){ // Object {_: Object, country: "Netherlands", zip: "1766KP"} "adress"
//console.log("******", data);
done.a = true;
@ -3346,7 +3345,6 @@ describe('Gun', function(){
it('multiple times', function(done){
var gun = Gun();
var app = gun.get('mult/times');
app.get('alias').get('mark').set(gun.get('ASDF').put({
@ -3389,7 +3387,8 @@ describe('Gun', function(){
}, s)});
var app = gun.get(s.soul);
//console.debug.i=1;console.log("===================");
app.get('alias').get('mark').map().val(function(alias){
//console.log("***", alias);
done.alias = alias;

View File

@ -1,7 +1,7 @@
var Radix = require('../../lib/radix');
var Radisk = require('../../lib/radisk');
var Store = require('../../lib/store');
//var Store = require('../../lib/rs3');
var Store = require('../../lib/rs3');
var Gun = require('../../gun');
var fs = require('fs');
@ -21,8 +21,8 @@ var diff;
var last = start;
var t = Gun.time.is;
var at = c;
;(function(){
;(function(){return;
start = Gun.time.is();
gun.get('j59an5jj2LUW8IJXl0u3').get('foo').on(function(data){
/*Radix.map(data, function(val, key){
@ -39,8 +39,6 @@ var diff;
});
}());
return;
var toc, alldone = function(){
acked++;
if(acked < TOTAL){ return }

16
test/radix.js Normal file
View File

@ -0,0 +1,16 @@
var expect = global.expect = require("./expect");
var Radix = require('../lib/radix');
var _ = String.fromCharCode(29);
describe('Radix', function(){
it('read', function(){
var rad = Radix();
rad('asdf.pub', 'yum');
rad('ablah', 'cool');
rad('node/circle.bob', 'awesome');
expect(rad('asdf.')).to.be.eql({pub: {'\u001e': 'yum'}});
expect(rad('nv/foo.bar')).to.be(undefined);
});
});