mirror of
https://github.com/planetmint/planetmint-go.git
synced 2025-06-06 14:16:39 +00:00

* added a MqttMonitor module with levelDB and periodic cleanup * initialized in the app * passed to dao keeper * added conversion methods (string2unixtime, byte ToJSON) * removed obsolete keeper code * maded RDDLToken.Factor public * added explicit mqtt client to the monitor module * restart mqtt connection in mqttmonitor on connection loss * adjusted mqttmock structure to be compatible * added some linter exclusions to let the monitor tool pass * created a MockMqttMonitor interface and mock object * used this to pass tests * made the MockMqttMonitor a global object so that it can be easily mocked * removed MockMqttMonitor from the app/keeper initialization * adjusted test cases to register "active machines" to the mqttmonitor * added mutex in mocks to protect against data races * defined mocks for the dao tests * clear separation between interface and mqtt-Monitor * added another waiting block to ensure the tx went through (multi-threading issue, race condition) during tests this failed sometimes * added memstorage to test instead of a file based DB Signed-off-by: Jürgen Eckel <juergen@riddleandcode.com>
74 lines
1.7 KiB
Go
74 lines
1.7 KiB
Go
package monitor
|
|
|
|
import (
|
|
"sync"
|
|
|
|
sdk "github.com/cosmos/cosmos-sdk/types"
|
|
"github.com/planetmint/planetmint-go/config"
|
|
"github.com/syndtr/goleveldb/leveldb"
|
|
)
|
|
|
|
type MQTTMonitorClientI interface {
|
|
AddParticipant(address string, lastSeenTS int64) (err error)
|
|
SelectPoPParticipantsOutOfActiveActors() (challenger string, challengee string, err error)
|
|
SetContext(ctx sdk.Context)
|
|
Start() (err error)
|
|
}
|
|
|
|
var monitorMutex sync.Mutex
|
|
var mqttMonitorInstance MQTTMonitorClientI
|
|
|
|
func SetMqttMonitorInstance(monitorInstance MQTTMonitorClientI) {
|
|
monitorMutex.Lock()
|
|
mqttMonitorInstance = monitorInstance
|
|
monitorMutex.Unlock()
|
|
}
|
|
|
|
func LazyMqttMonitorLoader(homeDir string) {
|
|
monitorMutex.Lock()
|
|
tmpInstance := mqttMonitorInstance
|
|
monitorMutex.Unlock()
|
|
if tmpInstance != nil {
|
|
return
|
|
}
|
|
if homeDir == "" {
|
|
homeDir = "./"
|
|
}
|
|
aciveActorsDB, err := leveldb.OpenFile(homeDir+"activeActors.db", nil)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
monitorMutex.Lock()
|
|
mqttMonitorInstance = NewMqttMonitorService(aciveActorsDB, *config.GetConfig())
|
|
monitorMutex.Unlock()
|
|
err = mqttMonitorInstance.Start()
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
}
|
|
|
|
func SetContext(ctx sdk.Context) {
|
|
monitorMutex.Lock()
|
|
mqttMonitorInstance.SetContext(ctx)
|
|
monitorMutex.Unlock()
|
|
}
|
|
|
|
func SelectPoPParticipantsOutOfActiveActors() (challenger string, challengee string, err error) {
|
|
monitorMutex.Lock()
|
|
challenger, challengee, err = mqttMonitorInstance.SelectPoPParticipantsOutOfActiveActors()
|
|
monitorMutex.Unlock()
|
|
return
|
|
}
|
|
|
|
func Start() (err error) {
|
|
err = mqttMonitorInstance.Start()
|
|
return
|
|
}
|
|
|
|
func AddParticipant(address string, lastSeenTS int64) (err error) {
|
|
monitorMutex.Lock()
|
|
err = mqttMonitorInstance.AddParticipant(address, lastSeenTS)
|
|
monitorMutex.Unlock()
|
|
return
|
|
}
|