From 9474753a8ef64a105766566678b9839157ea23b5 Mon Sep 17 00:00:00 2001 From: Mark Nadal Date: Mon, 9 Jul 2018 18:21:15 -0700 Subject: [PATCH] make Radisk available in browser for @fuchidahiro --- examples/style.css | 8 +- lib/load.js | 4 +- lib/open.js | 2 +- lib/radisk.js | 644 ++++++++++++++++++++++--------------------- lib/radix.js | 20 +- lib/rfs.js | 61 ++++ lib/store.js | 99 +------ lib/time.js | 2 +- lib/upload.js | 6 +- test/panic/radisk.js | 22 +- test/radisk.html | 34 +++ 11 files changed, 473 insertions(+), 429 deletions(-) create mode 100644 lib/rfs.js create mode 100644 test/radisk.html diff --git a/examples/style.css b/examples/style.css index 8ce1791e..6f7896aa 100644 --- a/examples/style.css +++ b/examples/style.css @@ -44,6 +44,11 @@ ul, li { } .model, .none { display: none } +.hide { + opacity: 0; + visibility: hidden; + transition: all 2s; +} .page { width: 100%; @@ -81,8 +86,7 @@ ul, li { padding: 3%; } .gully { - margin-top: 2em; - margin-bottom: 2em; + margin-bottom: 1%; } .sit { margin-bottom: 0; } diff --git a/lib/load.js b/lib/load.js index f34f9ca5..b84b2d0c 100644 --- a/lib/load.js +++ b/lib/load.js @@ -1,9 +1,9 @@ if(typeof window !== "undefined"){ var Gun = window.Gun; } else { - var Gun = require('gun/gun'); + var Gun = require('../gun'); } -Gun.chain.open || require('gun/lib/open'); +Gun.chain.open || require('./open'); Gun.chain.load = function(cb, opt, at){ (opt = opt || {}).off = !0; diff --git a/lib/open.js b/lib/open.js index 7191bff5..2b4e5642 100644 --- a/lib/open.js +++ b/lib/open.js @@ -1,7 +1,7 @@ if(typeof window !== "undefined"){ var Gun = window.Gun; } else { - var Gun = require('gun/gun'); + var Gun = require('../gun'); } Gun.chain.open = function(cb, opt, at){ diff --git a/lib/radisk.js b/lib/radisk.js index d2509975..8a5ff699 100644 --- a/lib/radisk.js +++ b/lib/radisk.js @@ -1,342 +1,350 @@ -var fs = require('fs'); -var Gun = require('../gun'); -var Radix = require('./radix'); +;(function(){ -function Radisk(opt){ + function Radisk(opt){ - opt = opt || {}; - opt.file = String(opt.file || 'radata'); - opt.until = opt.until || opt.wait || 1000; // default for HDDs - opt.batch = opt.batch || 10 * 1000; - opt.chunk = opt.chunk || (1024 * 1024 * 10); // 10MB - opt.code = opt.code || {}; - opt.code.from = opt.code.from || '!'; + opt = opt || {}; + opt.file = String(opt.file || 'radata'); + opt.until = opt.until || opt.wait || 1000; // default for HDDs + opt.batch = opt.batch || 10 * 1000; + opt.chunk = opt.chunk || (1024 * 1024 * 10); // 10MB + opt.code = opt.code || {}; + opt.code.from = opt.code.from || '!'; - function ename(t){ return encodeURIComponent(t).replace(/\*/g, '%2A') } + function ename(t){ return encodeURIComponent(t).replace(/\*/g, '%2A') } - if(!opt.store){ - return Gun.log("ERROR: Radisk needs `opt.store` interface with `{get: fn, put: fn, list: fn}`!"); - } - if(!opt.store.put){ - return Gun.log("ERROR: Radisk needs `store.put` interface with `(file, data, cb)`!"); - } - if(!opt.store.get){ - return Gun.log("ERROR: Radisk needs `store.get` interface with `(file, cb)`!"); - } - if(!opt.store.list){ - return Gun.log("ERROR: Radisk needs a streaming `store.list` interface with `(cb)`!"); - } + if(!opt.store){ + return Gun.log("ERROR: Radisk needs `opt.store` interface with `{get: fn, put: fn, list: fn}`!"); + } + if(!opt.store.put){ + return Gun.log("ERROR: Radisk needs `store.put` interface with `(file, data, cb)`!"); + } + if(!opt.store.get){ + return Gun.log("ERROR: Radisk needs `store.get` interface with `(file, cb)`!"); + } + if(!opt.store.list){ + return Gun.log("ERROR: Radisk needs a streaming `store.list` interface with `(cb)`!"); + } - /* - Any and all storage adapters should... - 1. Because writing to disk takes time, we should batch data to disk. This improves performance, and reduces potential disk corruption. - 2. If a batch exceeds a certain number of writes, we should immediately write to disk when physically possible. This caps total performance, but reduces potential loss. - */ - var r = function(key, val, cb){ - key = ''+key; - if(val instanceof Function){ - cb = val; - val = r.batch(key); - if(u !== val){ - return cb(u, val); + /* + Any and all storage adapters should... + 1. Because writing to disk takes time, we should batch data to disk. This improves performance, and reduces potential disk corruption. + 2. If a batch exceeds a certain number of writes, we should immediately write to disk when physically possible. This caps total performance, but reduces potential loss. + */ + var r = function(key, val, cb){ + key = ''+key; + if(val instanceof Function){ + cb = val; + val = r.batch(key); + if(u !== val){ + return cb(u, val); + } + if(r.thrash.at){ + val = r.thrash.at(key); + if(u !== val){ + return cb(u, val); + } + } + //console.log("READ FROM DISK"); + return r.read(key, cb); } - if(r.thrash.at){ - val = r.thrash.at(key); + r.batch(key, val); + if(cb){ r.batch.acks.push(cb) } + if(++r.batch.ed >= opt.batch){ return r.thrash() } // (2) + clearTimeout(r.batch.to); // (1) + r.batch.to = setTimeout(r.thrash, opt.until || 1); + } + + r.batch = Radix(); + r.batch.acks = []; + r.batch.ed = 0; + + r.thrash = function(){ + var thrash = r.thrash; + if(thrash.ing){ return thrash.more = true } + thrash.more = false; + thrash.ing = true; + var batch = thrash.at = r.batch, i = 0; + clearTimeout(r.batch.to); + r.batch = null; + r.batch = Radix(); + r.batch.acks = []; + r.batch.ed = 0; + r.save(batch, function(err, ok){ + if(++i > 1){ return } + if(err){ Gun.log(err) } + Gun.obj.map(batch.acks, function(cb){ cb(err, ok) }); + thrash.at = null; + thrash.ing = false; + if(thrash.more){ thrash() } + }); + } + + /* + 1. Find the first radix item in memory. + 2. Use that as the starting index in the directory of files. + 3. Find the first file that is lexically larger than it, + 4. Read the previous file to that into memory + 5. Scan through the in memory radix for all values lexically less than the limit. + 6. Merge and write all of those to the in-memory file and back to disk. + 7. If file to large, split. More details needed here. + */ + r.save = function(rad, cb){ + var s = function Span(){}; + s.find = function(tree, key){ + if(key < s.start){ return } + s.start = key; + opt.store.list(s.lex); + return true; + } + s.lex = function(file){ + file = (u === file)? u : decodeURIComponent(file); + if(!file || file > s.start){ + s.mix(s.file || opt.code.from, s.start, s.end = file); + return true; + } + s.file = file; + } + s.mix = function(file, start, end){ + s.start = s.end = s.file = u; + r.parse(file, function(err, disk){ + if(err){ return cb(err) } + Radix.map(rad, function(val, key){ + if(key < start){ return } + if(end && end < key){ return s.start = key } + // PLUGIN: consider adding HAM as an extra layer of protection + disk(key, val); // merge batch[key] -> disk[key] + }); + r.write(file, disk, s.next); + }); + } + s.next = function(err, ok){ + if(s.err = err){ return cb(err) } + if(s.start){ return Radix.map(rad, s.find) } + cb(err, ok); + } + Radix.map(rad, s.find); + } + + /* + Any storage engine at some point will have to do a read in order to write. + This is true of even systems that use an append only log, if they support updates. + Therefore it is unavoidable that a read will have to happen, + the question is just how long you delay it. + */ + r.write = function(file, rad, cb){ + var f = function Fractal(){}; + f.text = ''; + f.count = 0; + f.file = file; + f.each = function(val, key, k, pre){ + f.count++; + var enc = Radisk.encode(pre.length) +'#'+ Radisk.encode(k) + (u === val? '' : '='+ Radisk.encode(val)) +'\n'; + if(opt.chunk < f.text.length + enc.length){ + f.text = ''; + f.limit = Math.ceil(f.count/2); + f.count = 0; + f.sub = Radix(); + Radix.map(rad, f.slice); + return true; + } + f.text += enc; + } + f.write = function(){ opt.store.put(ename(file), f.text, cb) } + f.slice = function(val, key){ + if(key < f.file){ return } + if(f.limit < (++f.count)){ + var name = f.file; + f.file = key; + f.count = 0; + r.write(name, f.sub, f.next); + return true; + } + f.sub(key, val); + } + f.next = function(err){ + if(err){ return cb(err) } + f.sub = Radix(); + if(!Radix.map(rad, f.slice)){ + r.write(f.file, f.sub, cb); + } + } + if(!Radix.map(rad, f.each, true)){ f.write() } + } + + r.read = function(key, cb){ + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! + if(RAD){ // cache + var val = RAD(key); if(u !== val){ return cb(u, val); } } - //console.log("READ FROM DISK"); - return r.read(key, cb); - } - r.batch(key, val); - if(cb){ r.batch.acks.push(cb) } - if(++r.batch.ed >= opt.batch){ return r.thrash() } // (2) - clearTimeout(r.batch.to); // (1) - r.batch.to = setTimeout(r.thrash, opt.until || 1); - } - - r.batch = Radix(); - r.batch.acks = []; - r.batch.ed = 0; - - r.thrash = function(){ - var thrash = r.thrash; - if(thrash.ing){ return thrash.more = true } - thrash.more = false; - thrash.ing = true; - var batch = thrash.at = r.batch, i = 0; - clearTimeout(r.batch.to); - r.batch = null; - r.batch = Radix(); - r.batch.acks = []; - r.batch.ed = 0; - r.save(batch, function(err, ok){ - if(++i > 1){ return } - if(err){ Gun.log(err) } - Gun.obj.map(batch.acks, function(cb){ cb(err, ok) }); - thrash.at = null; - thrash.ing = false; - if(thrash.more){ thrash() } - }); - } - - /* - 1. Find the first radix item in memory. - 2. Use that as the starting index in the directory of files. - 3. Find the first file that is lexically larger than it, - 4. Read the previous file to that into memory - 5. Scan through the in memory radix for all values lexically less than the limit. - 6. Merge and write all of those to the in-memory file and back to disk. - 7. If file to large, split. More details needed here. - */ - r.save = function(rad, cb){ - var s = function Span(){}; - s.find = function(tree, key){ - if(key < s.start){ return } - s.start = key; - opt.store.list(s.lex); - return true; - } - s.lex = function(file){ - file = (u === file)? u : decodeURIComponent(file); - if(!file || file > s.start){ - s.mix(s.file || opt.code.from, s.start, s.end = file); - return true; - } - s.file = file; - } - s.mix = function(file, start, end){ - s.start = s.end = s.file = u; - r.parse(file, function(err, disk){ - if(err){ return cb(err) } - Radix.map(rad, function(val, key){ - if(key < start){ return } - if(end && end < key){ return s.start = key } - // PLUGIN: consider adding HAM as an extra layer of protection - disk(key, val); // merge batch[key] -> disk[key] - }); - r.write(file, disk, s.next); - }); - } - s.next = function(err, ok){ - if(s.err = err){ return cb(err) } - if(s.start){ return Radix.map(rad, s.find) } - cb(err, ok); - } - Radix.map(rad, s.find); - } - - /* - Any storage engine at some point will have to do a read in order to write. - This is true of even systems that use an append only log, if they support updates. - Therefore it is unavoidable that a read will have to happen, - the question is just how long you delay it. - */ - r.write = function(file, rad, cb){ - var f = function Fractal(){}; - f.text = ''; - f.count = 0; - f.file = file; - f.each = function(val, key, k, pre){ - f.count++; - var enc = Radisk.encode(pre.length) +'#'+ Radisk.encode(k) + (u === val? '' : '='+ Radisk.encode(val)) +'\n'; - if(opt.chunk < f.text.length + enc.length){ - f.text = ''; - f.limit = Math.ceil(f.count/2); - f.count = 0; - f.sub = Radix(); - Radix.map(rad, f.slice); - return true; - } - f.text += enc; - } - f.write = function(){ opt.store.put(ename(file), f.text, cb) } - f.slice = function(val, key){ - if(key < f.file){ return } - if(f.limit < (++f.count)){ - var name = f.file; - f.file = key; - f.count = 0; - r.write(name, f.sub, f.next); - return true; - } - f.sub(key, val); - } - f.next = function(err){ - if(err){ return cb(err) } - f.sub = Radix(); - if(!Radix.map(rad, f.slice)){ - r.write(f.file, f.sub, cb); - } - } - if(!Radix.map(rad, f.each, true)){ f.write() } - } - - r.read = function(key, cb){ - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - // TODO: BUG!!! If a node spans multiple file chunks, it won't return all! - if(RAD){ // cache - var val = RAD(key); - if(u !== val){ - return cb(u, val); - } - } - var g = function Get(){}, tmp; - g.lex = function(file){ - file = (u === file)? u : decodeURIComponent(file); - if(!file || file > key){ - if(tmp = q[g.file]){ - tmp.push({key: key, ack: cb}); + var g = function Get(){}, tmp; + g.lex = function(file){ + file = (u === file)? u : decodeURIComponent(file); + if(!file || file > key){ + if(tmp = q[g.file]){ + tmp.push({key: key, ack: cb}); + return true; + } + q[g.file] = [{key: key, ack: cb}]; + r.parse(g.file, g.it); return true; } - q[g.file] = [{key: key, ack: cb}]; - r.parse(g.file, g.it); - return true; + g.file = file; } - g.file = file; + g.it = function(err, disk){ + if(g.err = err){ Gun.log(err) } + if(disk){ RAD = disk } + disk = q[g.file]; Gun.obj.del(q, g.file); + Gun.obj.map(disk, g.ack); + } + g.ack = function(as){ + if(!as.ack){ return } + as.ack(g.err, (RAD || noop)(as.key)); + } + opt.store.list(g.lex); } - g.it = function(err, disk){ - if(g.err = err){ Gun.log(err) } - if(disk){ RAD = disk } - disk = q[g.file]; Gun.obj.del(q, g.file); - Gun.obj.map(disk, g.ack); + /* + Let us start by assuming we are the only process that is + changing the directory or bucket. Not because we do not want + to be multi-process/machine, but because we want to experiment + with how much performance and scale we can get out of only one. + Then we can work on the harder problem of being multi-process. + */ + r.parse = function(file, cb){ + var p = function Parse(){}, s = String.fromCharCode(31); + p.disk = Radix(); + p.read = function(err, data){ var tmp; + if(err){ return cb(err) } + if(!data){ return cb(u, p.disk) } + var tmp = p.split(data), pre = [], i, k, v; + while(tmp){ + k = v = u; + i = tmp[1]; + tmp = p.split(tmp[2])||''; + if('#' == tmp[0]){ + k = tmp[1]; + pre = pre.slice(0,i); + if(i <= pre.length){ + pre.push(k); + } + } + tmp = p.split(tmp[2])||''; + if('\n' == tmp[0]){ continue } + if('=' == tmp[0]){ v = tmp[1] } + if(u !== k && u !== v){ p.disk(pre.join(''), v) } + tmp = p.split(tmp[2]); + } + cb(u, p.disk); + }; + p.split = function(t){ + if(!t){ return } + var l = [], o = {}, i = -1, a = '', b, c; + while(c = t[++i]){ + if(s === c){ break } + a += c; + } + if(!c){ return } + l[0] = a; + l[1] = b = Radisk.decode(t.slice(i), o); + l[2] = t.slice(i + o.i); + return l; + } + opt.store.get(ename(file), p.read); } - g.ack = function(as){ - if(!as.ack){ return } - as.ack(g.err, (RAD || noop)(as.key)); - } - opt.store.list(g.lex); + + var q = {}, noop = function(){}, RAD, u; + return r; } - /* - Let us start by assuming we are the only process that is - changing the directory or bucket. Not because we do not want - to be multi-process/machine, but because we want to experiment - with how much performance and scale we can get out of only one. - Then we can work on the harder problem of being multi-process. - */ - r.parse = function(file, cb){ - var p = function Parse(){}, s = String.fromCharCode(31); - p.disk = Radix(); - p.read = function(err, data){ var tmp; - if(err){ return cb(err) } - if(!data){ return cb(u, p.disk) } - var tmp = p.split(data), pre = [], i, k, v; - while(tmp){ - k = v = u; - i = tmp[1]; - tmp = p.split(tmp[2])||''; - if('#' == tmp[0]){ - k = tmp[1]; - pre = pre.slice(0,i); - if(i <= pre.length){ - pre.push(k); + + + ;(function(){ + s = String.fromCharCode(31); + Radisk.encode = function(d, o){ + var t = s, tmp; + if(typeof d == 'string'){ + var i = -1, c; + while(c = d[++i]){ + if(s === c){ + t += s; } } - tmp = p.split(tmp[2])||''; - if('\n' == tmp[0]){ continue } - if('=' == tmp[0]){ v = tmp[1] } - if(u !== k && u !== v){ p.disk(pre.join(''), v) } - tmp = p.split(tmp[2]); - } - cb(u, p.disk); - }; - p.split = function(t){ - if(!t){ return } - var l = [], o = {}, i = -1, a = '', b, c; - while(c = t[++i]){ - if(s === c){ break } - a += c; - } - if(!c){ return } - l[0] = a; - l[1] = b = Radisk.decode(t.slice(i), o); - l[2] = t.slice(i + o.i); - return l; - } - opt.store.get(ename(file), p.read); - } - - var q = {}, noop = function(){}, RAD, u; - return r; -} - - -;(function(){ - s = String.fromCharCode(31); - Radisk.encode = function(d, o){ - var t = s, tmp; - if(typeof d == 'string'){ - var i = -1, c; - while(c = d[++i]){ - if(s === c){ - t += s; - } - } - return t + '"' + d + s; - } else - if(d && d['#'] && (tmp = Gun.val.link.is(d))){ - return t + '#' + tmp + t; - } else - if(Gun.num.is(d)){ - return t + '+' + (d||0) + t; - } else - if(null === d){ - return t + ' ' + t; - } else - if(true === d){ - return t + '+' + t; - } else - if(false === d){ - return t + '-' + t; - }// else - //if(binary){} - } - Radisk.decode = function(t, o){ - var d = '', i = -1, n = 0, c, p; - if(s !== t[0]){ return } - while(c = t[++i]){ - if(p){ - if(s === c){ - if(--n <= 0){ - break; - } - } - d += c; + return t + '"' + d + s; } else - if(s === c){ - ++n; - } else { - p = c || true; + if(d && d['#'] && (tmp = Gun.val.link.is(d))){ + return t + '#' + tmp + t; + } else + if(Gun.num.is(d)){ + return t + '+' + (d||0) + t; + } else + if(null === d){ + return t + ' ' + t; + } else + if(true === d){ + return t + '+' + t; + } else + if(false === d){ + return t + '-' + t; + }// else + //if(binary){} + } + Radisk.decode = function(t, o){ + var d = '', i = -1, n = 0, c, p; + if(s !== t[0]){ return } + while(c = t[++i]){ + if(p){ + if(s === c){ + if(--n <= 0){ + break; + } + } + d += c; + } else + if(s === c){ + ++n; + } else { + p = c || true; + } + } + if(o){ o.i = i+1 } + if('"' === p){ + return d; + } else + if('#' === p){ + return Gun.val.link.ify(d); + } else + if('+' === p){ + if(0 === d.length){ + return true; + } + return parseFloat(d); + } else + if(' ' === p){ + return null; + } else + if('-' === p){ + return false; } } - if(o){ o.i = i+1 } - if('"' === p){ - return d; - } else - if('#' === p){ - return Gun.val.link.ify(d); - } else - if('+' === p){ - if(0 === d.length){ - return true; - } - return parseFloat(d); - } else - if(' ' === p){ - return null; - } else - if('-' === p){ - return false; - } + }()); + + if(typeof window !== "undefined"){ + var Gun = window.Gun; + var Radix = window.Radix; + window.Radisk = Radisk; + } else { + var Gun = require('../gun'); + var Radix = require('./radix'); + try{ module.exports = Radisk }catch(e){} } -}()); -Radisk.Radix = Radix; + Radisk.Radix = Radix; -module.exports = Radisk; \ No newline at end of file +}()); \ No newline at end of file diff --git a/lib/radix.js b/lib/radix.js index 1fef14cb..ae8731db 100644 --- a/lib/radix.js +++ b/lib/radix.js @@ -1,8 +1,4 @@ -var Gun = require('../gun'); ;(function(){ - var map = Gun.obj.map, no = {}, u; - - var $ = String.fromCharCode(30), _ = String.fromCharCode(29); function Radix(){ var radix = function(key, val, t){ @@ -46,6 +42,7 @@ var Gun = require('../gun'); } return radix; }; + Radix.map = function map(radix, cb, opt, pre){ pre = pre || []; var t = radix[_] || radix, keys = Object.keys(t).sort(), i = 0, l = keys.length; for(;i < l; i++){ var key = keys[i], tree = t[key], tmp; @@ -64,7 +61,18 @@ var Gun = require('../gun'); } } }; + Object.keys = Object.keys || function(o){ return map(o, function(v,k,t){t(k)}) } + + if(typeof window !== "undefined"){ + var Gun = window.Gun; + window.Radix = Radix; + } else { + var Gun = require('../gun'); + try{ module.exports = Radix }catch(e){} + } - module.exports = Radix; -}()); + var map = Gun.obj.map, no = {}, u; + var $ = String.fromCharCode(30), _ = String.fromCharCode(29); + +}()); \ No newline at end of file diff --git a/lib/rfs.js b/lib/rfs.js new file mode 100644 index 00000000..b92293a0 --- /dev/null +++ b/lib/rfs.js @@ -0,0 +1,61 @@ +function Store(opt){ + opt = opt || {}; + opt.file = String(opt.file || 'radata'); + + var Gun = require('../gun'), fs = require('fs'), u; + var store = function Store(){}; + store.put = function(file, data, cb){ + var random = Math.random().toString(36).slice(-3) + fs.writeFile(opt.file+'-'+random+'.tmp', data, function(err, ok){ + if(err){ return cb(err) } + move(opt.file+'-'+random+'.tmp', opt.file+'/'+file, cb); + }); + }; + store.get = function(file, cb){ + fs.readFile(opt.file+'/'+file, function(err, data){ + if(err){ + if('ENOENT' === (err.code||'').toUpperCase()){ + return cb(null); + } + Gun.log("ERROR:", err) + } + if(data){ data = data.toString() } + cb(err, data); + }); + }; + store.list = function(cb, match){ + fs.readdir(opt.file, function(err, dir){ + Gun.obj.map(dir, cb) || cb(); // Stream interface requires a final call to know when to be done. + }); + }; + if(!fs.existsSync(opt.file)){ fs.mkdirSync(opt.file) } + //store.list(function(){ return true }); + + function move(oldPath, newPath, cb) { + fs.rename(oldPath, newPath, function (err) { + if (err) { + if (err.code === 'EXDEV') { + var readStream = fs.createReadStream(oldPath); + var writeStream = fs.createWriteStream(newPath); + + readStream.on('error', cb); + writeStream.on('error', cb); + + readStream.on('close', function () { + fs.unlink(oldPath, cb); + }); + + readStream.pipe(writeStream); + } else { + cb(err); + } + } else { + cb(); + } + }); + }; + + return store; +} + +module.exports = Store; \ No newline at end of file diff --git a/lib/store.js b/lib/store.js index bdf031e2..5445ca14 100644 --- a/lib/store.js +++ b/lib/store.js @@ -1,17 +1,21 @@ -var Gun = require('../gun'); -var Radisk = require('./radisk'); -var fs = require('fs'); -var Radix = Radisk.Radix; -var u; +if(typeof window === "undefined"){ + var Gun = require('../gun'); +} Gun.on('opt', function(ctx){ this.to.next(ctx); - var opt = ctx.opt; + var opt = ctx.opt, u; + if(typeof window !== "undefined"){ + opt.window = window; + } if(ctx.once){ return } - if(false !== opt.localStorage && !process.env.AWS_S3_BUCKET){ return } // TODO: Remove this after migration. + if(false !== opt.localStorage && !(!opt.window && process.env.AWS_S3_BUCKET)){ return } // TODO: Remove this after migration. if(false === opt.radisk){ return } console.log("BUG WARNING: Radix Storage Engine (RAD) has a known rare edge case, if data gets split between file chunks, a GET may only return the first chunk!!!"); - opt.store = opt.store || Store(opt); + var Radisk = (opt.window && opt.window.Radisk) || require('./radisk'); + var Radix = Radisk.Radix; + + opt.store = opt.store || (!opt.window && require('./rfs')(opt)); var rad = Radisk(opt); ctx.on('put', function(at){ @@ -52,81 +56,4 @@ Gun.on('opt', function(ctx){ } }); -}); - -function Store(opt){ - opt = opt || {}; - opt.file = String(opt.file || 'radata'); - - var store = function Store(){}; - store.put = function(file, data, cb){ - var random = Math.random().toString(36).slice(-3) - fs.writeFile(opt.file+'-'+random+'.tmp', data, function(err, ok){ - if(err){ return cb(err) } - move(opt.file+'-'+random+'.tmp', opt.file+'/'+file, cb); - }); - }; - store.get = function(file, cb){ - fs.readFile(opt.file+'/'+file, function(err, data){ - if(err){ - if('ENOENT' === (err.code||'').toUpperCase()){ - return cb(null); - } - Gun.log("ERROR:", err) - } - if(data){ data = data.toString() } - cb(err, data); - }); - }; - store.list = function(cb, match){ - fs.readdir(opt.file, function(err, dir){ - Gun.obj.map(dir, cb) || cb(); // Stream interface requires a final call to know when to be done. - }); - }; - if(!fs.existsSync(opt.file)){ fs.mkdirSync(opt.file) } - //store.list(function(){ return true }); - return store; -} - -function move(oldPath, newPath, cb) { - fs.rename(oldPath, newPath, function (err) { - if (err) { - if (err.code === 'EXDEV') { - var readStream = fs.createReadStream(oldPath); - var writeStream = fs.createWriteStream(newPath); - - readStream.on('error', cb); - writeStream.on('error', cb); - - readStream.on('close', function () { - fs.unlink(oldPath, cb); - }); - - readStream.pipe(writeStream); - } else { - cb(err); - } - } else { - cb(); - } - }); -}; - -module.exports = Store; - - -;(function(){ - return; - process.env.AWS_S3_BUCKET = 'test-s3'; - process.env.AWS_ACCESS_KEY_ID = 'asdf'; - process.env.AWS_SECRET_ACCESS_KEY = 'fdsa'; - process.env.fakes3 = 'http://localhost:4567'; - process.env.AWS_S3_THROTTLE = 0; - - return; - global.Gun = require('../gun'); - //require('./rs3'); - - - require('../test/abc'); -}()); \ No newline at end of file +}); \ No newline at end of file diff --git a/lib/time.js b/lib/time.js index 58983601..75b431f7 100644 --- a/lib/time.js +++ b/lib/time.js @@ -1,5 +1,5 @@ if(typeof window === "undefined"){ //Not in the browser, Include from node - var Gun = require('gun/gun'); + var Gun = require('../gun'); } ;(function(){ diff --git a/lib/upload.js b/lib/upload.js index 87985fd2..6cdadd21 100644 --- a/lib/upload.js +++ b/lib/upload.js @@ -27,7 +27,11 @@ var file = (((e.event || e).target || e).result || e), img = new Image(); img.src = file; img.onload = function(){ - if(!h && img.width > w){ h = img.height * (w / img.width) } + if(img.width < w && img.height < (h||Infinity)){ + e.base64 = file; + return cb(e || file); + } + if(!h){ h = img.height * (w / img.width) } var canvas = document.createElement('canvas'), ctx = canvas.getContext('2d'); canvas.width = w; canvas.height = h; diff --git a/test/panic/radisk.js b/test/panic/radisk.js index c0e7fd1f..c48149e4 100644 --- a/test/panic/radisk.js +++ b/test/panic/radisk.js @@ -3,10 +3,12 @@ var config = { port: 8080, servers: 2, browsers: 2, - each: 2000, + each: 100, burst: 50, wait: 1, dir: __dirname, + chunk: 1024 * 10, + notrad: false, route: { '/': __dirname + '/index.html', '/gun.js': __dirname + '/../../gun.js', @@ -50,25 +52,19 @@ describe("Make sure the Radix Storage Engine (RSE) works.", function(){ it("GUN started!", function(){ return server.run(function(test){ var env = test.props; - console.log("????", process.argv); test.async(); if(require('fs').existsSync('radata')){ console.log("Please delete previous data first!"); explode; return; } - /*setInterval(function(){ - var mem = process.memoryUsage(); - var u = Math.round(mem.heapUsed / 1024 / 1024 * 100) / 100; - console.log(u, 'MB of', Math.round(mem.heapTotal / 1024 / 1024 * 100) / 100); - }, 1000);*/ var port = env.config.port + env.i; var server = require('http').createServer(function(req, res){ res.end("I am "+ env.i +"!"); }); var Gun = require('gun'); //require('gun/lib/store'); - var gun = Gun({web: server, localStorage: false, until: 1, memory: 50, chunk: 1024 * 100}); + var gun = Gun({web: server, localStorage: env.config.notrad, until: 1, memory: 50, chunk: env.config.chunk, file: 'radata'}); server.listen(port, function(){ test.done(); }); @@ -86,7 +82,7 @@ describe("Make sure the Radix Storage Engine (RSE) works.", function(){ console.log("I AM ALICE"); localStorage.clear(); var env = test.props; - var gun = Gun({peers: 'http://'+ env.config.IP + ':' + (env.config.port + 1) + '/gun', localStorage: false}); + var gun = Gun({peers: 'http://'+ env.config.IP + ':' + (env.config.port + 1) + '/gun', localStorage: env.config.notrad}); window.gun = gun; var n = Gun.time.is(), i = 0, c = 0, b = env.config.burst, l = env.config.each; @@ -154,7 +150,7 @@ describe("Make sure the Radix Storage Engine (RSE) works.", function(){ }); var Gun = require('gun'); //require('gun/lib/store'); - var gun = Gun({web: server, localStorage: false, until: 1, memory: 50, chunk: 1024 * 100}); + var gun = Gun({web: server, localStorage: env.config.notrad, until: 1, memory: 50, chunk: env.config.notrad, file: 'radata'}); server.listen(port, function(){ test.done(); }); @@ -167,11 +163,12 @@ describe("Make sure the Radix Storage Engine (RSE) works.", function(){ console.log("I AM BOB"); localStorage.clear(); var env = test.props; - var gun = Gun({peers: 'http://'+ env.config.IP + ':' + (env.config.port + 2) + '/gun', localStorage: false}); + var gun = Gun({peers: 'http://'+ env.config.IP + ':' + (env.config.port + 2) + '/gun', localStorage: env.config.notrad}); window.gun = gun; var n = Gun.time.is(), i = 0, c = 0, b = env.config.burst, l = env.config.each; var raw = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + window.FOO = []; function check(i){ if(i > l){ @@ -183,7 +180,8 @@ describe("Make sure the Radix Storage Engine (RSE) works.", function(){ if((raw+i) !== data.hello){ return test.fail('wrong ' + i) } if(d){ return } d = true; //!(c % b) && - console.log(c+'/'+l);//, '@'+Math.floor(b/((-n + (n = Gun.time.is()))/1000))+'/sec')); + window.FOO.push(i); + console.log(c+'/'+l, 'yeah?', i, Gun.node.soul(data));//, '@'+Math.floor(b/((-n + (n = Gun.time.is()))/1000))+'/sec')); window.GOT = c++; //localStorage.clear(); ref.off(); diff --git a/test/radisk.html b/test/radisk.html new file mode 100644 index 00000000..0083af92 --- /dev/null +++ b/test/radisk.html @@ -0,0 +1,34 @@ +

Radisk

+ + + + + + + + \ No newline at end of file