Sqlstore refactor (#11908)

* refactor: tracing service refactoring

* refactor: sqlstore to instance service

* refactor: sqlstore & registory priority

* refactor: sqlstore refactor wip

* sqlstore: progress on getting tests to work again

* sqlstore: progress on refactoring and getting tests working

* sqlstore: connection string fix

* fix: not sure why this test is not working and required changing expires

* fix: updated grafana-cli
pull/11988/head
Torkel Ödegaard 7 years ago committed by GitHub
parent 78e837cf39
commit 6c0752473a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 4
      pkg/cmd/grafana-cli/commands/commands.go
  2. 25
      pkg/cmd/grafana-server/server.go
  3. 39
      pkg/registry/registry.go
  4. 13
      pkg/services/sqlstore/dashboard_snapshot_test.go
  5. 2
      pkg/services/sqlstore/migrations/migrations_test.go
  6. 21
      pkg/services/sqlstore/migrations/org_mig.go
  7. 2
      pkg/services/sqlstore/migrator/migrator.go
  8. 1
      pkg/services/sqlstore/quota_test.go
  9. 301
      pkg/services/sqlstore/sqlstore.go
  10. 4
      pkg/setting/setting.go
  11. 7
      public/app/core/specs/file_export.jest.ts
  12. 2
      public/app/features/templating/template_srv.ts
  13. 2
      public/app/plugins/datasource/prometheus/specs/result_transformer.jest.ts

@ -22,7 +22,9 @@ func runDbCommand(command func(commandLine CommandLine) error) func(context *cli
Args: flag.Args(), Args: flag.Args(),
}) })
sqlstore.NewEngine() engine := &sqlstore.SqlStore{}
engine.Cfg = cfg
engine.Init()
if err := command(cmd); err != nil { if err := command(cmd); err != nil {
logger.Errorf("\n%s: ", color.RedString("Error")) logger.Errorf("\n%s: ", color.RedString("Error"))

@ -8,7 +8,6 @@ import (
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
"reflect"
"strconv" "strconv"
"time" "time"
@ -23,7 +22,6 @@ import (
"github.com/grafana/grafana/pkg/api" "github.com/grafana/grafana/pkg/api"
"github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/log"
"github.com/grafana/grafana/pkg/login" "github.com/grafana/grafana/pkg/login"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/setting" "github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/social" "github.com/grafana/grafana/pkg/social"
@ -37,6 +35,7 @@ import (
_ "github.com/grafana/grafana/pkg/services/notifications" _ "github.com/grafana/grafana/pkg/services/notifications"
_ "github.com/grafana/grafana/pkg/services/provisioning" _ "github.com/grafana/grafana/pkg/services/provisioning"
_ "github.com/grafana/grafana/pkg/services/search" _ "github.com/grafana/grafana/pkg/services/search"
_ "github.com/grafana/grafana/pkg/services/sqlstore"
_ "github.com/grafana/grafana/pkg/tracing" _ "github.com/grafana/grafana/pkg/tracing"
) )
@ -70,10 +69,6 @@ func (g *GrafanaServerImpl) Run() error {
g.loadConfiguration() g.loadConfiguration()
g.writePIDFile() g.writePIDFile()
// initSql
sqlstore.NewEngine() // TODO: this should return an error
sqlstore.EnsureAdminUser()
login.Init() login.Init()
social.NewOAuthService() social.NewOAuthService()
@ -88,7 +83,7 @@ func (g *GrafanaServerImpl) Run() error {
// Add all services to dependency graph // Add all services to dependency graph
for _, service := range services { for _, service := range services {
serviceGraph.Provide(&inject.Object{Value: service}) serviceGraph.Provide(&inject.Object{Value: service.Instance})
} }
serviceGraph.Provide(&inject.Object{Value: g}) serviceGraph.Provide(&inject.Object{Value: g})
@ -100,25 +95,25 @@ func (g *GrafanaServerImpl) Run() error {
// Init & start services // Init & start services
for _, service := range services { for _, service := range services {
if registry.IsDisabled(service) { if registry.IsDisabled(service.Instance) {
continue continue
} }
g.log.Info("Initializing " + reflect.TypeOf(service).Elem().Name()) g.log.Info("Initializing " + service.Name)
if err := service.Init(); err != nil { if err := service.Instance.Init(); err != nil {
return fmt.Errorf("Service init failed: %v", err) return fmt.Errorf("Service init failed: %v", err)
} }
} }
// Start background services // Start background services
for index := range services { for _, descriptor := range services {
service, ok := services[index].(registry.BackgroundService) service, ok := descriptor.Instance.(registry.BackgroundService)
if !ok { if !ok {
continue continue
} }
if registry.IsDisabled(services[index]) { if registry.IsDisabled(descriptor.Instance) {
continue continue
} }
@ -133,9 +128,9 @@ func (g *GrafanaServerImpl) Run() error {
// If error is not canceled then the service crashed // If error is not canceled then the service crashed
if err != context.Canceled && err != nil { if err != context.Canceled && err != nil {
g.log.Error("Stopped "+reflect.TypeOf(service).Elem().Name(), "reason", err) g.log.Error("Stopped "+descriptor.Name, "reason", err)
} else { } else {
g.log.Info("Stopped "+reflect.TypeOf(service).Elem().Name(), "reason", err) g.log.Info("Stopped "+descriptor.Name, "reason", err)
} }
// Mark that we are in shutdown mode // Mark that we are in shutdown mode

@ -2,15 +2,35 @@ package registry
import ( import (
"context" "context"
"reflect"
"sort"
) )
var services = []Service{} type Descriptor struct {
Name string
Instance Service
InitPriority Priority
}
var services []*Descriptor
func RegisterService(srv Service) { func RegisterService(instance Service) {
services = append(services, srv) services = append(services, &Descriptor{
Name: reflect.TypeOf(instance).Elem().Name(),
Instance: instance,
InitPriority: Low,
})
} }
func GetServices() []Service { func Register(descriptor *Descriptor) {
services = append(services, descriptor)
}
func GetServices() []*Descriptor {
sort.Slice(services, func(i, j int) bool {
return services[i].InitPriority > services[j].InitPriority
})
return services return services
} }
@ -27,7 +47,18 @@ type BackgroundService interface {
Run(ctx context.Context) error Run(ctx context.Context) error
} }
type HasInitPriority interface {
GetInitPriority() Priority
}
func IsDisabled(srv Service) bool { func IsDisabled(srv Service) bool {
canBeDisabled, ok := srv.(CanBeDisabled) canBeDisabled, ok := srv.(CanBeDisabled)
return ok && canBeDisabled.IsDisabled() return ok && canBeDisabled.IsDisabled()
} }
type Priority int
const (
High Priority = 100
Low Priority = 0
)

@ -4,7 +4,6 @@ import (
"testing" "testing"
"time" "time"
"github.com/go-xorm/xorm"
. "github.com/smartystreets/goconvey/convey" . "github.com/smartystreets/goconvey/convey"
"github.com/grafana/grafana/pkg/components/simplejson" "github.com/grafana/grafana/pkg/components/simplejson"
@ -110,14 +109,14 @@ func TestDashboardSnapshotDBAccess(t *testing.T) {
} }
func TestDeleteExpiredSnapshots(t *testing.T) { func TestDeleteExpiredSnapshots(t *testing.T) {
x := InitTestDB(t) sqlstore := InitTestDB(t)
Convey("Testing dashboard snapshots clean up", t, func() { Convey("Testing dashboard snapshots clean up", t, func() {
setting.SnapShotRemoveExpired = true setting.SnapShotRemoveExpired = true
notExpiredsnapshot := createTestSnapshot(x, "key1", 48000) notExpiredsnapshot := createTestSnapshot(sqlstore, "key1", 48000)
createTestSnapshot(x, "key2", -1200) createTestSnapshot(sqlstore, "key2", -1200)
createTestSnapshot(x, "key3", -1200) createTestSnapshot(sqlstore, "key3", -1200)
err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{}) err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
So(err, ShouldBeNil) So(err, ShouldBeNil)
@ -146,7 +145,7 @@ func TestDeleteExpiredSnapshots(t *testing.T) {
}) })
} }
func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardSnapshot { func createTestSnapshot(sqlstore *SqlStore, key string, expires int64) *m.DashboardSnapshot {
cmd := m.CreateDashboardSnapshotCommand{ cmd := m.CreateDashboardSnapshotCommand{
Key: key, Key: key,
DeleteKey: "delete" + key, DeleteKey: "delete" + key,
@ -163,7 +162,7 @@ func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardS
// Set expiry date manually - to be able to create expired snapshots // Set expiry date manually - to be able to create expired snapshots
if expires < 0 { if expires < 0 {
expireDate := time.Now().Add(time.Second * time.Duration(expires)) expireDate := time.Now().Add(time.Second * time.Duration(expires))
_, err = x.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id) _, err = sqlstore.engine.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id)
So(err, ShouldBeNil) So(err, ShouldBeNil)
} }

@ -39,7 +39,7 @@ func TestMigrations(t *testing.T) {
has, err := x.SQL(sql).Get(&r) has, err := x.SQL(sql).Get(&r)
So(err, ShouldBeNil) So(err, ShouldBeNil)
So(has, ShouldBeTrue) So(has, ShouldBeTrue)
expectedMigrations := mg.MigrationsCount() - 2 //we currently skip to migrations. We should rewrite skipped migrations to write in the log as well. until then we have to keep this expectedMigrations := mg.MigrationsCount() //we currently skip to migrations. We should rewrite skipped migrations to write in the log as well. until then we have to keep this
So(r.Count, ShouldEqual, expectedMigrations) So(r.Count, ShouldEqual, expectedMigrations)
mg = NewMigrator(x) mg = NewMigrator(x)

@ -48,27 +48,6 @@ func addOrgMigrations(mg *Migrator) {
mg.AddMigration("create org_user table v1", NewAddTableMigration(orgUserV1)) mg.AddMigration("create org_user table v1", NewAddTableMigration(orgUserV1))
addTableIndicesMigrations(mg, "v1", orgUserV1) addTableIndicesMigrations(mg, "v1", orgUserV1)
//------- copy data from old table-------------------
mg.AddMigration("copy data account to org", NewCopyTableDataMigration("org", "account", map[string]string{
"id": "id",
"version": "version",
"name": "name",
"created": "created",
"updated": "updated",
}).IfTableExists("account"))
mg.AddMigration("copy data account_user to org_user", NewCopyTableDataMigration("org_user", "account_user", map[string]string{
"id": "id",
"org_id": "account_id",
"user_id": "user_id",
"role": "role",
"created": "created",
"updated": "updated",
}).IfTableExists("account_user"))
mg.AddMigration("Drop old table account", NewDropTableMigration("account"))
mg.AddMigration("Drop old table account_user", NewDropTableMigration("account_user"))
mg.AddMigration("Update org table charset", NewTableCharsetMigration("org", []*Column{ mg.AddMigration("Update org table charset", NewTableCharsetMigration("org", []*Column{
{Name: "name", Type: DB_NVarchar, Length: 190, Nullable: false}, {Name: "name", Type: DB_NVarchar, Length: 190, Nullable: false},
{Name: "address1", Type: DB_NVarchar, Length: 255, Nullable: true}, {Name: "address1", Type: DB_NVarchar, Length: 255, Nullable: true},

@ -125,7 +125,7 @@ func (mg *Migrator) exec(m Migration, sess *xorm.Session) error {
sql, args := condition.Sql(mg.dialect) sql, args := condition.Sql(mg.dialect)
results, err := sess.SQL(sql).Query(args...) results, err := sess.SQL(sql).Query(args...)
if err != nil || len(results) == 0 { if err != nil || len(results) == 0 {
mg.Logger.Info("Skipping migration condition not fulfilled", "id", m.Id()) mg.Logger.Debug("Skipping migration condition not fulfilled", "id", m.Id())
return sess.Rollback() return sess.Rollback()
} }
} }

@ -43,6 +43,7 @@ func TestQuotaCommandsAndQueries(t *testing.T) {
Name: "TestOrg", Name: "TestOrg",
UserId: 1, UserId: 1,
} }
err := CreateOrg(&userCmd) err := CreateOrg(&userCmd)
So(err, ShouldBeNil) So(err, ShouldBeNil)
orgId = userCmd.Result.Id orgId = userCmd.Result.Id

@ -13,6 +13,7 @@ import (
"github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/log"
m "github.com/grafana/grafana/pkg/models" m "github.com/grafana/grafana/pkg/models"
"github.com/grafana/grafana/pkg/registry"
"github.com/grafana/grafana/pkg/services/annotations" "github.com/grafana/grafana/pkg/services/annotations"
"github.com/grafana/grafana/pkg/services/sqlstore/migrations" "github.com/grafana/grafana/pkg/services/sqlstore/migrations"
"github.com/grafana/grafana/pkg/services/sqlstore/migrator" "github.com/grafana/grafana/pkg/services/sqlstore/migrator"
@ -27,151 +28,164 @@ import (
_ "github.com/grafana/grafana/pkg/tsdb/mssql" _ "github.com/grafana/grafana/pkg/tsdb/mssql"
) )
type DatabaseConfig struct {
Type, Host, Name, User, Pwd, Path, SslMode string
CaCertPath string
ClientKeyPath string
ClientCertPath string
ServerCertName string
MaxOpenConn int
MaxIdleConn int
ConnMaxLifetime int
}
var ( var (
x *xorm.Engine x *xorm.Engine
dialect migrator.Dialect dialect migrator.Dialect
HasEngine bool sqlog log.Logger = log.New("sqlstore")
DbCfg DatabaseConfig
UseSQLite3 bool
sqlog log.Logger = log.New("sqlstore")
) )
func EnsureAdminUser() { func init() {
statsQuery := m.GetSystemStatsQuery{} registry.Register(&registry.Descriptor{
Name: "SqlStore",
if err := bus.Dispatch(&statsQuery); err != nil { Instance: &SqlStore{},
log.Fatal(3, "Could not determine if admin user exists: %v", err) InitPriority: registry.High,
return })
} }
if statsQuery.Result.Users > 0 {
return
}
cmd := m.CreateUserCommand{}
cmd.Login = setting.AdminUser
cmd.Email = setting.AdminUser + "@localhost"
cmd.Password = setting.AdminPassword
cmd.IsAdmin = true
if err := bus.Dispatch(&cmd); err != nil { type SqlStore struct {
log.Error(3, "Failed to create default admin user", err) Cfg *setting.Cfg `inject:""`
return
}
log.Info("Created default admin user: %v", setting.AdminUser) dbCfg DatabaseConfig
engine *xorm.Engine
log log.Logger
skipEnsureAdmin bool
} }
func NewEngine() *xorm.Engine { func (ss *SqlStore) Init() error {
x, err := getEngine() ss.log = log.New("sqlstore")
ss.readConfig()
if err != nil { engine, err := ss.getEngine()
sqlog.Crit("Fail to connect to database", "error", err)
os.Exit(1)
}
err = SetEngine(x)
if err != nil { if err != nil {
sqlog.Error("Fail to initialize orm engine", "error", err) return fmt.Errorf("Fail to connect to database: %v", err)
os.Exit(1)
} }
return x ss.engine = engine
}
func SetEngine(engine *xorm.Engine) (err error) { // temporarily still set global var
x = engine x = engine
dialect = migrator.NewDialect(x) dialect = migrator.NewDialect(x)
migrator := migrator.NewMigrator(x) migrator := migrator.NewMigrator(x)
migrations.AddMigrations(migrator) migrations.AddMigrations(migrator)
if err := migrator.Start(); err != nil { if err := migrator.Start(); err != nil {
return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err) return fmt.Errorf("Migration failed err: %v", err)
} }
// Init repo instances // Init repo instances
annotations.SetRepository(&SqlAnnotationRepo{}) annotations.SetRepository(&SqlAnnotationRepo{})
// ensure admin user
if ss.skipEnsureAdmin {
return nil
}
return ss.ensureAdminUser()
}
func (ss *SqlStore) ensureAdminUser() error {
statsQuery := m.GetSystemStatsQuery{}
if err := bus.Dispatch(&statsQuery); err != nil {
fmt.Errorf("Could not determine if admin user exists: %v", err)
}
if statsQuery.Result.Users > 0 {
return nil
}
cmd := m.CreateUserCommand{}
cmd.Login = setting.AdminUser
cmd.Email = setting.AdminUser + "@localhost"
cmd.Password = setting.AdminPassword
cmd.IsAdmin = true
if err := bus.Dispatch(&cmd); err != nil {
return fmt.Errorf("Failed to create admin user: %v", err)
}
ss.log.Info("Created default admin user: %v", setting.AdminUser)
return nil return nil
} }
func getEngine() (*xorm.Engine, error) { func (ss *SqlStore) buildConnectionString() (string, error) {
LoadConfig() cnnstr := ss.dbCfg.ConnectionString
cnnstr := "" // special case used by integration tests
switch DbCfg.Type { if cnnstr != "" {
return cnnstr, nil
}
switch ss.dbCfg.Type {
case migrator.MYSQL: case migrator.MYSQL:
protocol := "tcp" protocol := "tcp"
if strings.HasPrefix(DbCfg.Host, "/") { if strings.HasPrefix(ss.dbCfg.Host, "/") {
protocol = "unix" protocol = "unix"
} }
cnnstr = fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true", cnnstr = fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true",
url.QueryEscape(DbCfg.User), url.QueryEscape(DbCfg.Pwd), protocol, DbCfg.Host, url.PathEscape(DbCfg.Name)) ss.dbCfg.User, ss.dbCfg.Pwd, protocol, ss.dbCfg.Host, ss.dbCfg.Name)
if DbCfg.SslMode == "true" || DbCfg.SslMode == "skip-verify" { if ss.dbCfg.SslMode == "true" || ss.dbCfg.SslMode == "skip-verify" {
tlsCert, err := makeCert("custom", DbCfg) tlsCert, err := makeCert("custom", ss.dbCfg)
if err != nil { if err != nil {
return nil, err return "", err
} }
mysql.RegisterTLSConfig("custom", tlsCert) mysql.RegisterTLSConfig("custom", tlsCert)
cnnstr += "&tls=custom" cnnstr += "&tls=custom"
} }
case migrator.POSTGRES: case migrator.POSTGRES:
var host, port = "127.0.0.1", "5432" var host, port = "127.0.0.1", "5432"
fields := strings.Split(DbCfg.Host, ":") fields := strings.Split(ss.dbCfg.Host, ":")
if len(fields) > 0 && len(strings.TrimSpace(fields[0])) > 0 { if len(fields) > 0 && len(strings.TrimSpace(fields[0])) > 0 {
host = fields[0] host = fields[0]
} }
if len(fields) > 1 && len(strings.TrimSpace(fields[1])) > 0 { if len(fields) > 1 && len(strings.TrimSpace(fields[1])) > 0 {
port = fields[1] port = fields[1]
} }
cnnstr = fmt.Sprintf("user='%s' password='%s' host='%s' port='%s' dbname='%s' sslmode='%s' sslcert='%s' sslkey='%s' sslrootcert='%s'", if ss.dbCfg.Pwd == "" {
strings.Replace(DbCfg.User, `'`, `\'`, -1), ss.dbCfg.Pwd = "''"
strings.Replace(DbCfg.Pwd, `'`, `\'`, -1), }
strings.Replace(host, `'`, `\'`, -1), if ss.dbCfg.User == "" {
strings.Replace(port, `'`, `\'`, -1), ss.dbCfg.User = "''"
strings.Replace(DbCfg.Name, `'`, `\'`, -1), }
strings.Replace(DbCfg.SslMode, `'`, `\'`, -1), cnnstr = fmt.Sprintf("user=%s password=%s host=%s port=%s dbname=%s sslmode=%s sslcert=%s sslkey=%s sslrootcert=%s", ss.dbCfg.User, ss.dbCfg.Pwd, host, port, ss.dbCfg.Name, ss.dbCfg.SslMode, ss.dbCfg.ClientCertPath, ss.dbCfg.ClientKeyPath, ss.dbCfg.CaCertPath)
strings.Replace(DbCfg.ClientCertPath, `'`, `\'`, -1),
strings.Replace(DbCfg.ClientKeyPath, `'`, `\'`, -1),
strings.Replace(DbCfg.CaCertPath, `'`, `\'`, -1),
)
case migrator.SQLITE: case migrator.SQLITE:
if !filepath.IsAbs(DbCfg.Path) { // special case for tests
DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path) if !filepath.IsAbs(ss.dbCfg.Path) {
ss.dbCfg.Path = filepath.Join(setting.DataPath, ss.dbCfg.Path)
} }
os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm) os.MkdirAll(path.Dir(ss.dbCfg.Path), os.ModePerm)
cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc" cnnstr = "file:" + ss.dbCfg.Path + "?cache=shared&mode=rwc"
default: default:
return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type) return "", fmt.Errorf("Unknown database type: %s", ss.dbCfg.Type)
} }
sqlog.Info("Initializing DB", "dbtype", DbCfg.Type) return cnnstr, nil
engine, err := xorm.NewEngine(DbCfg.Type, cnnstr) }
func (ss *SqlStore) getEngine() (*xorm.Engine, error) {
connectionString, err := ss.buildConnectionString()
if err != nil { if err != nil {
return nil, err return nil, err
} }
engine.SetMaxOpenConns(DbCfg.MaxOpenConn) sqlog.Info("Connecting to DB", "dbtype", ss.dbCfg.Type)
engine.SetMaxIdleConns(DbCfg.MaxIdleConn) engine, err := xorm.NewEngine(ss.dbCfg.Type, connectionString)
engine.SetConnMaxLifetime(time.Second * time.Duration(DbCfg.ConnMaxLifetime)) if err != nil {
debugSql := setting.Raw.Section("database").Key("log_queries").MustBool(false) return nil, err
}
engine.SetMaxOpenConns(ss.dbCfg.MaxOpenConn)
engine.SetMaxIdleConns(ss.dbCfg.MaxIdleConn)
engine.SetConnMaxLifetime(time.Second * time.Duration(ss.dbCfg.ConnMaxLifetime))
// configure sql logging
debugSql := ss.Cfg.Raw.Section("database").Key("log_queries").MustBool(false)
if !debugSql { if !debugSql {
engine.SetLogger(&xorm.DiscardLogger{}) engine.SetLogger(&xorm.DiscardLogger{})
} else { } else {
@ -183,95 +197,90 @@ func getEngine() (*xorm.Engine, error) {
return engine, nil return engine, nil
} }
func LoadConfig() { func (ss *SqlStore) readConfig() {
sec := setting.Raw.Section("database") sec := ss.Cfg.Raw.Section("database")
cfgURL := sec.Key("url").String() cfgURL := sec.Key("url").String()
if len(cfgURL) != 0 { if len(cfgURL) != 0 {
dbURL, _ := url.Parse(cfgURL) dbURL, _ := url.Parse(cfgURL)
DbCfg.Type = dbURL.Scheme ss.dbCfg.Type = dbURL.Scheme
DbCfg.Host = dbURL.Host ss.dbCfg.Host = dbURL.Host
pathSplit := strings.Split(dbURL.Path, "/") pathSplit := strings.Split(dbURL.Path, "/")
if len(pathSplit) > 1 { if len(pathSplit) > 1 {
DbCfg.Name = pathSplit[1] ss.dbCfg.Name = pathSplit[1]
} }
userInfo := dbURL.User userInfo := dbURL.User
if userInfo != nil { if userInfo != nil {
DbCfg.User = userInfo.Username() ss.dbCfg.User = userInfo.Username()
DbCfg.Pwd, _ = userInfo.Password() ss.dbCfg.Pwd, _ = userInfo.Password()
} }
} else { } else {
DbCfg.Type = sec.Key("type").String() ss.dbCfg.Type = sec.Key("type").String()
DbCfg.Host = sec.Key("host").String() ss.dbCfg.Host = sec.Key("host").String()
DbCfg.Name = sec.Key("name").String() ss.dbCfg.Name = sec.Key("name").String()
DbCfg.User = sec.Key("user").String() ss.dbCfg.User = sec.Key("user").String()
if len(DbCfg.Pwd) == 0 { ss.dbCfg.ConnectionString = sec.Key("connection_string").String()
DbCfg.Pwd = sec.Key("password").String() ss.dbCfg.Pwd = sec.Key("password").String()
}
}
DbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0)
DbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(0)
DbCfg.ConnMaxLifetime = sec.Key("conn_max_lifetime").MustInt(14400)
if DbCfg.Type == "sqlite3" {
UseSQLite3 = true
// only allow one connection as sqlite3 has multi threading issues that cause table locks
// DbCfg.MaxIdleConn = 1
// DbCfg.MaxOpenConn = 1
} }
DbCfg.SslMode = sec.Key("ssl_mode").String()
DbCfg.CaCertPath = sec.Key("ca_cert_path").String() ss.dbCfg.MaxOpenConn = sec.Key("max_open_conn").MustInt(0)
DbCfg.ClientKeyPath = sec.Key("client_key_path").String() ss.dbCfg.MaxIdleConn = sec.Key("max_idle_conn").MustInt(2)
DbCfg.ClientCertPath = sec.Key("client_cert_path").String() ss.dbCfg.ConnMaxLifetime = sec.Key("conn_max_lifetime").MustInt(14400)
DbCfg.ServerCertName = sec.Key("server_cert_name").String()
DbCfg.Path = sec.Key("path").MustString("data/grafana.db") ss.dbCfg.SslMode = sec.Key("ssl_mode").String()
ss.dbCfg.CaCertPath = sec.Key("ca_cert_path").String()
ss.dbCfg.ClientKeyPath = sec.Key("client_key_path").String()
ss.dbCfg.ClientCertPath = sec.Key("client_cert_path").String()
ss.dbCfg.ServerCertName = sec.Key("server_cert_name").String()
ss.dbCfg.Path = sec.Key("path").MustString("data/grafana.db")
} }
func InitTestDB(t *testing.T) *xorm.Engine { func InitTestDB(t *testing.T) *SqlStore {
selectedDb := migrator.SQLITE sqlstore := &SqlStore{}
// selectedDb := migrator.MYSQL sqlstore.skipEnsureAdmin = true
// selectedDb := migrator.POSTGRES
var x *xorm.Engine dbType := migrator.SQLITE
var err error
// environment variable present for test db? // environment variable present for test db?
if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present { if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
selectedDb = db dbType = db
} }
switch strings.ToLower(selectedDb) { // set test db config
case migrator.MYSQL: sqlstore.Cfg = setting.NewCfg()
x, err = xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr) sec, _ := sqlstore.Cfg.Raw.NewSection("database")
case migrator.POSTGRES: sec.NewKey("type", dbType)
x, err = xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr)
switch dbType {
case "mysql":
sec.NewKey("connection_string", sqlutil.TestDB_Mysql.ConnStr)
case "postgres":
sec.NewKey("connection_string", sqlutil.TestDB_Postgres.ConnStr)
default: default:
x, err = xorm.NewEngine(sqlutil.TestDB_Sqlite3.DriverName, sqlutil.TestDB_Sqlite3.ConnStr) sec.NewKey("connection_string", sqlutil.TestDB_Sqlite3.ConnStr)
} }
x.DatabaseTZ = time.UTC // need to get engine to clean db before we init
x.TZLocation = time.UTC engine, err := xorm.NewEngine(dbType, sec.Key("connection_string").String())
if err != nil { if err != nil {
t.Fatalf("Failed to init test database: %v", err) t.Fatalf("Failed to init test database: %v", err)
} }
dialect = migrator.NewDialect(x) dialect = migrator.NewDialect(engine)
if err := dialect.CleanDB(); err != nil {
err = dialect.CleanDB()
if err != nil {
t.Fatalf("Failed to clean test db %v", err) t.Fatalf("Failed to clean test db %v", err)
} }
if err := SetEngine(x); err != nil { if err := sqlstore.Init(); err != nil {
t.Fatal(err) t.Fatalf("Failed to init test database: %v", err)
} }
// x.ShowSQL() //// sqlstore.engine.DatabaseTZ = time.UTC
//// sqlstore.engine.TZLocation = time.UTC
return x return sqlstore
} }
func IsTestDbMySql() bool { func IsTestDbMySql() bool {
@ -289,3 +298,15 @@ func IsTestDbPostgres() bool {
return false return false
} }
type DatabaseConfig struct {
Type, Host, Name, User, Pwd, Path, SslMode string
CaCertPath string
ClientKeyPath string
ClientCertPath string
ServerCertName string
ConnectionString string
MaxOpenConn int
MaxIdleConn int
ConnMaxLifetime int
}

@ -495,7 +495,9 @@ func validateStaticRootPath() error {
} }
func NewCfg() *Cfg { func NewCfg() *Cfg {
return &Cfg{} return &Cfg{
Raw: ini.Empty(),
}
} }
func (cfg *Cfg) Load(args *CommandLineArgs) error { func (cfg *Cfg) Load(args *CommandLineArgs) error {

@ -63,7 +63,6 @@ describe('file_export', () => {
}); });
describe('when exporting table data to csv', () => { describe('when exporting table data to csv', () => {
it('should properly escape special characters and quote all string values', () => { it('should properly escape special characters and quote all string values', () => {
const inputTable = { const inputTable = {
columns: [ columns: [
@ -104,13 +103,11 @@ describe('file_export', () => {
it('should decode HTML encoded characters', function() { it('should decode HTML encoded characters', function() {
const inputTable = { const inputTable = {
columns: [ columns: [{ text: 'string_value' }],
{ text: 'string_value' },
],
rows: [ rows: [
['&quot;&amp;&auml;'], ['&quot;&amp;&auml;'],
['<strong>&quot;some html&quot;</strong>'], ['<strong>&quot;some html&quot;</strong>'],
['<a href="http://something/index.html">some text</a>'] ['<a href="http://something/index.html">some text</a>'],
], ],
}; };

@ -75,7 +75,7 @@ export class TemplateSrv {
return luceneEscape(value); return luceneEscape(value);
} }
if (value instanceof Array && value.length === 0) { if (value instanceof Array && value.length === 0) {
return '__empty__'; return '__empty__';
} }
var quotedValues = _.map(value, function(val) { var quotedValues = _.map(value, function(val) {
return '"' + luceneEscape(val) + '"'; return '"' + luceneEscape(val) + '"';

@ -49,7 +49,7 @@ describe('Prometheus Result Transformer', () => {
}); });
it('should column title include refId if response count is more than 2', () => { it('should column title include refId if response count is more than 2', () => {
var table = ctx.resultTransformer.transformMetricDataToTable(response.data.result, 2, "B"); var table = ctx.resultTransformer.transformMetricDataToTable(response.data.result, 2, 'B');
expect(table.type).toBe('table'); expect(table.type).toBe('table');
expect(table.columns).toEqual([ expect(table.columns).toEqual([
{ text: 'Time', type: 'time' }, { text: 'Time', type: 'time' },

Loading…
Cancel
Save