schema fixes

This commit is contained in:
ryan 2016-12-15 16:02:26 +01:00 committed by Rodolphe Marques
parent 243c739d2b
commit dcba5421f1
4 changed files with 36 additions and 22 deletions

View File

@ -16,7 +16,7 @@ generic backend interfaces to the implementations in this module.
"""
# Register the single dispatched modules on import.
from bigchaindb.backend.mongodb import changefeed, schema, query # noqa
from bigchaindb.backend.mongodb import schema, query # noqa no changefeed for now
# MongoDBConnection should always be accessed via
# ``bigchaindb.backend.connect()``.

View File

@ -26,7 +26,13 @@ class MongoDBConnection(Connection):
self.port = port or bigchaindb.config['database']['port']
self.dbname = dbname or bigchaindb.config['database']['name']
self.max_tries = max_tries
self.conn = None
self.connection = None
@property
def conn(self):
if self.connection is None:
self._connect()
return self.connection
@property
def db(self):
@ -39,7 +45,7 @@ class MongoDBConnection(Connection):
def _connect(self):
for i in range(self.max_tries):
try:
self.conn = MongoClient(self.host, self.port)
self.connection = MongoClient(self.host, self.port)
except ConnectionFailure as exc:
if i + 1 == self.max_tries:
raise

View File

@ -16,13 +16,13 @@ register_schema = module_dispatch_registrar(backend.schema)
@register_schema(MongoDBConnection)
def create_database(conn, dbname):
if dbname in conn.database_names():
if dbname in conn.conn.database_names():
raise exceptions.DatabaseAlreadyExists('Database `{}` already exists'
.format(dbname))
logger.info('Create database `%s`.', dbname)
# TODO: read and write concerns can be declared here
conn.get_database(dbname)
conn.conn.get_database(dbname)
@register_schema(MongoDBConnection)
@ -31,7 +31,7 @@ def create_tables(conn, dbname):
logger.info('Create `%s` table.', table_name)
# create the table
# TODO: read and write concerns can be declared here
conn[dbname].create_collection(table_name)
conn.conn[dbname].create_collection(table_name)
@register_schema(MongoDBConnection)
@ -43,37 +43,39 @@ def create_indexes(conn, dbname):
@register_schema(MongoDBConnection)
def drop_database(conn, dbname):
conn.drop_database(dbname)
conn.conn.drop_database(dbname)
def create_bigchain_secondary_index(conn, dbname):
logger.info('Create `bigchain` secondary index.')
# to select blocks by id
conn[dbname]['bigchain'].create_index('id', name='block_id')
conn.conn[dbname]['bigchain'].create_index('id', name='block_id')
# to order blocks by timestamp
conn[dbname]['bigchain'].create_index('block.timestamp', ASCENDING,
conn.conn[dbname]['bigchain'].create_index([('block.timestamp', ASCENDING)],
name='block_timestamp')
# to query the bigchain for a transaction id, this field is unique
conn[dbname]['bigchain'].create_index('block.transactions.id',
conn.conn[dbname]['bigchain'].create_index('block.transactions.id',
name='transaction_id', unique=True)
# secondary index for payload data by UUID, this field is unique
conn[dbname]['bigchain']\
conn.conn[dbname]['bigchain']\
.create_index('block.transactions.transaction.metadata.id',
name='metadata_id', unique=True)
# secondary index for asset uuid, this field is unique
conn[dbname]['bigchain']\
conn.conn[dbname]['bigchain']\
.create_index('block.transactions.transaction.asset.id',
name='asset_id', unique=True)
# compound index on fulfillment and transactions id
conn[dbname]['bigchain']\
.create_index(['block.transactions.transaction.fulfillments.txid',
'block.transactions.transaction.fulfillments.cid'],
conn.conn[dbname]['bigchain']\
.create_index([('block.transactions.transaction.fulfillments.txid',
ASCENDING),
('block.transactions.transaction.fulfillments.cid',
ASCENDING)],
name='tx_and_fulfillment')
@ -81,12 +83,13 @@ def create_backlog_secondary_index(conn, dbname):
logger.info('Create `backlog` secondary index.')
# to order transactions by timestamp
conn[dbname]['backlog'].create_index('transaction.timestamp', ASCENDING,
name='transaction_timestamp')
conn.conn[dbname]['backlog']\
.create_index([('transaction.timestamp', ASCENDING)],
name='transaction_timestamp')
# compound index to read transactions from the backlog per assignee
conn[dbname]['backlog']\
.create_index(['assignee',
conn.conn[dbname]['backlog']\
.create_index([('assignee', ASCENDING),
('assignment_timestamp', DESCENDING)],
name='assignee__transaction_timestamp')
@ -95,10 +98,12 @@ def create_votes_secondary_index(conn, dbname):
logger.info('Create `votes` secondary index.')
# index on block id to quickly poll
conn[dbname]['votes'].create_index('vote.voting_for_block',
conn.conn[dbname]['votes'].create_index('vote.voting_for_block',
name='voting_for')
# is the first index redundant then?
# compound index to order votes by block id and node
conn[dbname]['votes'].create_index(['vote.voting_for_block',
'node_pubkey'], name='block_and_voter')
conn.conn[dbname]['votes']\
.create_index([('vote.voting_for_block', ASCENDING),
('node_pubkey', ASCENDING)],
name='block_and_voter')

View File

@ -64,6 +64,9 @@ def restore_config(request, node_config):
def node_config(request):
config = copy.deepcopy(CONFIG)
config['database']['backend'] = request.config.getoption('--database-backend')
if config['database']['backend'] == 'mongodb':
# not a great way to do this
config['database']['port'] = 27017
return config