mirror of
https://github.com/bigchaindb/bigchaindb.git
synced 2024-10-13 13:34:05 +00:00
Merge branch 'lluminita-resolve-issue-458'
This commit is contained in:
commit
150cade4db
@ -18,55 +18,91 @@ def get_conn():
|
||||
bigchaindb.config['database']['port'])
|
||||
|
||||
|
||||
def get_database_name():
|
||||
return bigchaindb.config['database']['name']
|
||||
|
||||
|
||||
def create_database(conn, dbname):
|
||||
if r.db_list().contains(dbname).run(conn):
|
||||
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(dbname))
|
||||
|
||||
logger.info('Create database `%s`.', dbname)
|
||||
r.db_create(dbname).run(conn)
|
||||
|
||||
|
||||
def create_table(conn, dbname, table_name):
|
||||
logger.info('Create `%s` table.', table_name)
|
||||
# create the table
|
||||
r.db(dbname).table_create(table_name).run(conn)
|
||||
|
||||
|
||||
def create_bigchain_secondary_index(conn, dbname):
|
||||
logger.info('Create `bigchain` secondary index.')
|
||||
# to order blocks by timestamp
|
||||
r.db(dbname).table('bigchain')\
|
||||
.index_create('block_timestamp', r.row['block']['timestamp'])\
|
||||
.run(conn)
|
||||
# to query the bigchain for a transaction id
|
||||
r.db(dbname).table('bigchain')\
|
||||
.index_create('transaction_id',
|
||||
r.row['block']['transactions']['id'], multi=True)\
|
||||
.run(conn)
|
||||
# secondary index for payload data by UUID
|
||||
r.db(dbname).table('bigchain')\
|
||||
.index_create('payload_uuid',
|
||||
r.row['block']['transactions']['transaction']['data']['uuid'], multi=True)\
|
||||
.run(conn)
|
||||
|
||||
# wait for rethinkdb to finish creating secondary indexes
|
||||
r.db(dbname).table('bigchain').index_wait().run(conn)
|
||||
|
||||
|
||||
def create_backlog_secondary_index(conn, dbname):
|
||||
logger.info('Create `backlog` secondary index.')
|
||||
# to order transactions by timestamp
|
||||
r.db(dbname).table('backlog')\
|
||||
.index_create('transaction_timestamp',
|
||||
r.row['transaction']['timestamp'])\
|
||||
.run(conn)
|
||||
# compound index to read transactions from the backlog per assignee
|
||||
r.db(dbname).table('backlog')\
|
||||
.index_create('assignee__transaction_timestamp',
|
||||
[r.row['assignee'], r.row['transaction']['timestamp']])\
|
||||
.run(conn)
|
||||
|
||||
# wait for rethinkdb to finish creating secondary indexes
|
||||
r.db(dbname).table('backlog').index_wait().run(conn)
|
||||
|
||||
|
||||
def create_votes_secondary_index(conn, dbname):
|
||||
logger.info('Create `votes` secondary index.')
|
||||
# compound index to order votes by block id and node
|
||||
r.db(dbname).table('votes')\
|
||||
.index_create('block_and_voter',
|
||||
[r.row['vote']['voting_for_block'],
|
||||
r.row['node_pubkey']])\
|
||||
.run(conn)
|
||||
|
||||
# wait for rethinkdb to finish creating secondary indexes
|
||||
r.db(dbname).table('votes').index_wait().run(conn)
|
||||
|
||||
|
||||
def init():
|
||||
# Try to access the keypair, throws an exception if it does not exist
|
||||
b = bigchaindb.Bigchain()
|
||||
|
||||
conn = get_conn()
|
||||
dbname = bigchaindb.config['database']['name']
|
||||
dbname = get_database_name()
|
||||
create_database(conn, dbname)
|
||||
|
||||
if r.db_list().contains(dbname).run(conn):
|
||||
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'.format(dbname))
|
||||
table_names = ['bigchain', 'backlog', 'votes']
|
||||
for table_name in table_names:
|
||||
create_table(conn, dbname, table_name)
|
||||
create_bigchain_secondary_index(conn, dbname)
|
||||
create_backlog_secondary_index(conn, dbname)
|
||||
create_votes_secondary_index(conn, dbname)
|
||||
|
||||
logger.info('Create:')
|
||||
logger.info(' - database `%s`', dbname)
|
||||
r.db_create(dbname).run(conn)
|
||||
|
||||
logger.info(' - tables')
|
||||
# create the tables
|
||||
r.db(dbname).table_create('bigchain').run(conn)
|
||||
r.db(dbname).table_create('backlog').run(conn)
|
||||
r.db(dbname).table_create('votes').run(conn)
|
||||
|
||||
logger.info(' - indexes')
|
||||
# create the secondary indexes
|
||||
# to order blocks by timestamp
|
||||
r.db(dbname).table('bigchain').index_create('block_timestamp', r.row['block']['timestamp']).run(conn)
|
||||
# to order transactions by timestamp
|
||||
r.db(dbname).table('backlog').index_create('transaction_timestamp', r.row['transaction']['timestamp']).run(conn)
|
||||
# to query the bigchain for a transaction id
|
||||
r.db(dbname).table('bigchain').index_create('transaction_id',
|
||||
r.row['block']['transactions']['id'], multi=True).run(conn)
|
||||
# compound index to read transactions from the backlog per assignee
|
||||
r.db(dbname).table('backlog')\
|
||||
.index_create('assignee__transaction_timestamp', [r.row['assignee'], r.row['transaction']['timestamp']])\
|
||||
.run(conn)
|
||||
|
||||
# compound index to order votes by block id and node
|
||||
r.db(dbname).table('votes').index_create('block_and_voter',
|
||||
[r.row['vote']['voting_for_block'], r.row['node_pubkey']]).run(conn)
|
||||
|
||||
# secondary index for payload data by UUID
|
||||
r.db(dbname).table('bigchain')\
|
||||
.index_create('payload_uuid', r.row['block']['transactions']['transaction']['data']['uuid'], multi=True)\
|
||||
.run(conn)
|
||||
|
||||
# wait for rethinkdb to finish creating secondary indexes
|
||||
r.db(dbname).table('backlog').index_wait().run(conn)
|
||||
r.db(dbname).table('bigchain').index_wait().run(conn)
|
||||
r.db(dbname).table('votes').index_wait().run(conn)
|
||||
|
||||
logger.info(' - genesis block')
|
||||
logger.info('Create genesis block.')
|
||||
b.create_genesis_block()
|
||||
logger.info('Done, have fun!')
|
||||
|
||||
|
@ -37,6 +37,113 @@ def test_init_creates_db_tables_and_indexes():
|
||||
'assignee__transaction_timestamp').run(conn) is True
|
||||
|
||||
|
||||
def test_create_database():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
assert r.db_list().contains(dbname).run(conn) is True
|
||||
|
||||
|
||||
def test_create_bigchain_table():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
utils.create_table(conn, dbname, 'bigchain')
|
||||
|
||||
assert r.db(dbname).table_list().contains('bigchain').run(conn) is True
|
||||
assert r.db(dbname).table_list().contains('backlog').run(conn) is False
|
||||
assert r.db(dbname).table_list().contains('votes').run(conn) is False
|
||||
|
||||
|
||||
def test_create_bigchain_secondary_index():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
utils.create_table(conn, dbname, 'bigchain')
|
||||
utils.create_bigchain_secondary_index(conn, dbname)
|
||||
|
||||
assert r.db(dbname).table('bigchain').index_list().contains(
|
||||
'block_timestamp').run(conn) is True
|
||||
assert r.db(dbname).table('bigchain').index_list().contains(
|
||||
'transaction_id').run(conn) is True
|
||||
assert r.db(dbname).table('bigchain').index_list().contains(
|
||||
'payload_uuid').run(conn) is True
|
||||
|
||||
|
||||
def test_create_backlog_table():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
utils.create_table(conn, dbname, 'backlog')
|
||||
|
||||
assert r.db(dbname).table_list().contains('backlog').run(conn) is True
|
||||
assert r.db(dbname).table_list().contains('bigchain').run(conn) is False
|
||||
assert r.db(dbname).table_list().contains('votes').run(conn) is False
|
||||
|
||||
|
||||
def test_create_backlog_secondary_index():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
utils.create_table(conn, dbname, 'backlog')
|
||||
utils.create_backlog_secondary_index(conn, dbname)
|
||||
|
||||
assert r.db(dbname).table('backlog').index_list().contains(
|
||||
'transaction_timestamp').run(conn) is True
|
||||
assert r.db(dbname).table('backlog').index_list().contains(
|
||||
'assignee__transaction_timestamp').run(conn) is True
|
||||
|
||||
|
||||
def test_create_votes_table():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
utils.create_table(conn, dbname, 'votes')
|
||||
|
||||
assert r.db(dbname).table_list().contains('votes').run(conn) is True
|
||||
assert r.db(dbname).table_list().contains('bigchain').run(conn) is False
|
||||
assert r.db(dbname).table_list().contains('backlog').run(conn) is False
|
||||
|
||||
|
||||
def test_create_votes_secondary_index():
|
||||
conn = utils.get_conn()
|
||||
dbname = utils.get_database_name()
|
||||
|
||||
# The db is set up by fixtures so we need to remove it
|
||||
# and recreate it just with one table
|
||||
r.db_drop(dbname).run(conn)
|
||||
utils.create_database(conn, dbname)
|
||||
utils.create_table(conn, dbname, 'votes')
|
||||
utils.create_votes_secondary_index(conn, dbname)
|
||||
|
||||
assert r.db(dbname).table('votes').index_list().contains(
|
||||
'block_and_voter').run(conn) is True
|
||||
|
||||
|
||||
def test_init_fails_if_db_exists():
|
||||
conn = utils.get_conn()
|
||||
dbname = bigchaindb.config['database']['name']
|
||||
|
Loading…
x
Reference in New Issue
Block a user