Drone: Enable running go benchmarks on demand (#70359)

* Add benchmark

* Run pkg/api benchmarks in CI

* Conditionally run benchmarks for provided go packages

* Bypass fork check for promotes
This commit is contained in:
Sofia Papagiannaki 2023-07-07 11:55:01 +03:00 committed by GitHub
parent 536146de5f
commit e82d437e0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 847 additions and 49 deletions

View File

@ -1159,6 +1159,169 @@ environment:
image_pull_secrets:
- dockerconfigjson
kind: pipeline
name: pr-integration-benchmarks
node:
type: no-parallel
platform:
arch: amd64
os: linux
services:
- environment:
PGDATA: /var/lib/postgresql/data/pgdata
POSTGRES_DB: grafanatest
POSTGRES_PASSWORD: grafanatest
POSTGRES_USER: grafanatest
image: postgres:12.3-alpine
name: postgres
volumes:
- name: postgres
path: /var/lib/postgresql/data/pgdata
- commands:
- docker-entrypoint.sh mysqld --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
environment:
MYSQL_DATABASE: grafana_tests
MYSQL_PASSWORD: password
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_USER: grafana
image: mysql:5.7.39
name: mysql57
volumes:
- name: mysql57
path: /var/lib/mysql
- commands:
- docker-entrypoint.sh mysqld --default-authentication-plugin=mysql_native_password
environment:
MYSQL_DATABASE: grafana_tests
MYSQL_PASSWORD: password
MYSQL_ROOT_PASSWORD: rootpass
MYSQL_USER: grafana
image: mysql:8.0.32
name: mysql80
volumes:
- name: mysql80
path: /var/lib/mysql
- environment: {}
image: redis:6.2.11-alpine
name: redis
- environment: {}
image: memcached:1.6.9-alpine
name: memcached
steps:
- commands:
- git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git"
../grafana-enterprise
- cd ../grafana-enterprise
- if git checkout ${DRONE_SOURCE_BRANCH}; then echo "checked out ${DRONE_SOURCE_BRANCH}";
elif git checkout ${DRONE_TARGET_BRANCH}; then echo "git checkout ${DRONE_TARGET_BRANCH}";
else git checkout main; fi
- cd ../
- ln -s src grafana
- cd ./grafana-enterprise
- ./build.sh
environment:
GITHUB_TOKEN:
from_secret: github_token
failure: ignore
image: grafana/build-container:1.7.4
name: clone-enterprise
- commands:
- go build -o ./bin/build -ldflags '-extldflags -static' ./pkg/build/cmd
depends_on: []
environment:
CGO_ENABLED: 0
image: golang:1.20.4
name: compile-build-cmd
- commands:
- '# It is required that code generated from Thema/CUE be committed and in sync
with its inputs.'
- '# The following command will fail if running code generators produces any diff
in output.'
- CODEGEN_VERIFY=1 make gen-cue
depends_on:
- clone-enterprise
image: grafana/build-container:1.7.4
name: verify-gen-cue
- commands:
- '# It is required that generated jsonnet is committed and in sync with its inputs.'
- '# The following command will fail if running code generators produces any diff
in output.'
- CODEGEN_VERIFY=1 make gen-jsonnet
depends_on:
- clone-enterprise
image: grafana/build-container:1.7.4
name: verify-gen-jsonnet
- commands:
- make gen-go
depends_on:
- verify-gen-cue
image: grafana/build-container:1.7.4
name: wire-install
- commands:
- if [ -z ${GO_PACKAGES} ]; then echo 'missing GO_PACKAGES'; false; fi
- go test -v -run=^$ -benchmem -timeout=1h -count=8 -bench=. ${GO_PACKAGES}
depends_on:
- wire-install
image: grafana/build-container:1.7.4
name: sqlite-benchmark-integration-tests
- commands:
- if [ -z ${GO_PACKAGES} ]; then echo 'missing GO_PACKAGES'; false; fi
- go test -v -run=^$ -benchmem -timeout=1h -count=8 -bench=. ${GO_PACKAGES}
depends_on:
- wire-install
environment:
GRAFANA_TEST_DB: postgres
PGPASSWORD: grafanatest
POSTGRES_HOST: postgres
image: grafana/build-container:1.7.4
name: postgres-benchmark-integration-tests
- commands:
- if [ -z ${GO_PACKAGES} ]; then echo 'missing GO_PACKAGES'; false; fi
- go test -v -run=^$ -benchmem -timeout=1h -count=8 -bench=. ${GO_PACKAGES}
depends_on:
- wire-install
environment:
GRAFANA_TEST_DB: mysql
MYSQL_HOST: mysql57
image: grafana/build-container:1.7.4
name: mysql-5.7-benchmark-integration-tests
- commands:
- if [ -z ${GO_PACKAGES} ]; then echo 'missing GO_PACKAGES'; false; fi
- go test -v -run=^$ -benchmem -timeout=1h -count=8 -bench=. ${GO_PACKAGES}
depends_on:
- wire-install
environment:
GRAFANA_TEST_DB: mysql
MYSQL_HOST: mysql80
image: grafana/build-container:1.7.4
name: mysql-8.0-benchmark-integration-tests
trigger:
event:
- promote
target:
- gobenchmarks
type: docker
volumes:
- host:
path: /var/run/docker.sock
name: docker
- name: postgres
temp:
medium: memory
- name: mysql57
temp:
medium: memory
- name: mysql80
temp:
medium: memory
---
clone:
retries: 3
depends_on: []
environment:
EDITION: oss
image_pull_secrets:
- dockerconfigjson
kind: pipeline
name: main-docs
node:
type: no-parallel
@ -7189,6 +7352,6 @@ kind: secret
name: delivery-bot-app-private-key
---
kind: signature
hmac: 5b576f21a7afb08759ad4ac7b74a54afcfbf9b5eac0e5894857ce4cdd0feed50
hmac: 80038c08bedd62d8a0e4250e12e3dbb27712dd640694ff9f57ef14a82282ab76
...

View File

@ -0,0 +1,516 @@
package api
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/grafana/grafana/pkg/api/dtos"
"github.com/grafana/grafana/pkg/api/routing"
"github.com/grafana/grafana/pkg/bus"
"github.com/grafana/grafana/pkg/components/simplejson"
"github.com/grafana/grafana/pkg/infra/localcache"
"github.com/grafana/grafana/pkg/infra/log"
"github.com/grafana/grafana/pkg/infra/tracing"
"github.com/grafana/grafana/pkg/services/accesscontrol"
"github.com/grafana/grafana/pkg/services/accesscontrol/acimpl"
acdb "github.com/grafana/grafana/pkg/services/accesscontrol/database"
"github.com/grafana/grafana/pkg/services/accesscontrol/ossaccesscontrol"
"github.com/grafana/grafana/pkg/services/contexthandler/ctxkey"
contextmodel "github.com/grafana/grafana/pkg/services/contexthandler/model"
"github.com/grafana/grafana/pkg/services/dashboards"
"github.com/grafana/grafana/pkg/services/dashboards/database"
dashboardservice "github.com/grafana/grafana/pkg/services/dashboards/service"
"github.com/grafana/grafana/pkg/services/featuremgmt"
"github.com/grafana/grafana/pkg/services/folder/folderimpl"
"github.com/grafana/grafana/pkg/services/guardian"
"github.com/grafana/grafana/pkg/services/licensing/licensingtest"
"github.com/grafana/grafana/pkg/services/org/orgimpl"
"github.com/grafana/grafana/pkg/services/quota/quotatest"
"github.com/grafana/grafana/pkg/services/search"
"github.com/grafana/grafana/pkg/services/sqlstore"
"github.com/grafana/grafana/pkg/services/star"
"github.com/grafana/grafana/pkg/services/star/startest"
"github.com/grafana/grafana/pkg/services/supportbundles/bundleregistry"
"github.com/grafana/grafana/pkg/services/tag/tagimpl"
"github.com/grafana/grafana/pkg/services/team"
"github.com/grafana/grafana/pkg/services/team/teamimpl"
"github.com/grafana/grafana/pkg/services/user"
"github.com/grafana/grafana/pkg/services/user/userimpl"
"github.com/grafana/grafana/pkg/setting"
"github.com/grafana/grafana/pkg/web"
"github.com/grafana/grafana/pkg/web/webtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
LEVEL0_FOLDER_NUM = 300
LEVEL1_FOLDER_NUM = 30
LEVEL2_FOLDER_NUM = 5
LEVEL0_DASHBOARD_NUM = 300
LEVEL1_DASHBOARD_NUM = 30
LEVEL2_DASHBOARD_NUM = 5
TEAM_NUM = 50
TEAM_MEMBER_NUM = 5
MAXIMUM_INT_POSTGRES = 2147483647
)
type benchScenario struct {
db *sqlstore.SQLStore
// signedInUser is the user that is signed in to the server
cfg *setting.Cfg
signedInUser *user.SignedInUser
teamSvc team.Service
userSvc user.Service
}
func BenchmarkFolderListAndSearch(b *testing.B) {
start := time.Now()
b.Log("setup start")
sc := setupDB(b)
b.Log("setup time:", time.Since(start))
all := LEVEL0_FOLDER_NUM*LEVEL0_DASHBOARD_NUM + LEVEL0_FOLDER_NUM*LEVEL1_FOLDER_NUM*LEVEL1_DASHBOARD_NUM + LEVEL0_FOLDER_NUM*LEVEL1_FOLDER_NUM*LEVEL2_FOLDER_NUM*LEVEL2_DASHBOARD_NUM
// the maximum number of dashboards that can be returned by the search API
// otherwise the handler fails with 422 status code
const limit = 5000
withLimit := func(res int) int {
if res > limit {
return limit
}
return res
}
benchmarks := []struct {
desc string
url string
expectedLen int
features *featuremgmt.FeatureManager
}{
{
desc: "get root folders with nested folders feature enabled",
url: "/api/folders",
expectedLen: LEVEL0_FOLDER_NUM,
features: featuremgmt.WithFeatures("nestedFolders"),
},
{
desc: "get subfolders with nested folders feature enabled",
url: "/api/folders?parentUid=folder0",
expectedLen: LEVEL1_FOLDER_NUM,
features: featuremgmt.WithFeatures("nestedFolders"),
},
{
desc: "list all inherited dashboards with nested folders feature enabled",
url: "/api/search?type=dash-db&limit=5000",
expectedLen: withLimit(all),
features: featuremgmt.WithFeatures("nestedFolders"),
},
{
desc: "search for pattern with nested folders feature enabled",
url: "/api/search?type=dash-db&query=dashboard_0_0&limit=5000",
expectedLen: withLimit(1 + LEVEL1_DASHBOARD_NUM + LEVEL2_FOLDER_NUM*LEVEL2_DASHBOARD_NUM),
features: featuremgmt.WithFeatures("nestedFolders"),
},
{
desc: "search for specific dashboard nested folders feature enabled",
url: "/api/search?type=dash-db&query=dashboard_0_0_0_0",
expectedLen: 1,
features: featuremgmt.WithFeatures("nestedFolders"),
},
{
desc: "get root folders with nested folders feature disabled",
url: "/api/folders?limit=5000",
expectedLen: withLimit(LEVEL0_FOLDER_NUM),
features: featuremgmt.WithFeatures(),
},
{
desc: "list all dashboards with nested folders feature disabled",
url: "/api/search?type=dash-db&limit=5000",
expectedLen: withLimit(LEVEL0_FOLDER_NUM * LEVEL0_DASHBOARD_NUM),
features: featuremgmt.WithFeatures(),
},
{
desc: "search specific dashboard with nested folders feature disabled",
url: "/api/search?type=dash-db&query=dashboard_0_0",
expectedLen: 1,
features: featuremgmt.WithFeatures(),
},
}
for _, bm := range benchmarks {
b.Run(bm.desc, func(b *testing.B) {
m := setupServer(b, sc, bm.features)
req := httptest.NewRequest(http.MethodGet, bm.url, nil)
req = webtest.RequestWithSignedInUser(req, sc.signedInUser)
b.ResetTimer()
for i := 0; i < b.N; i++ {
rec := httptest.NewRecorder()
m.ServeHTTP(rec, req)
require.Equal(b, 200, rec.Code)
var resp []dtos.FolderSearchHit
err := json.Unmarshal(rec.Body.Bytes(), &resp)
require.NoError(b, err)
assert.Len(b, resp, bm.expectedLen)
}
})
}
}
func setupDB(b testing.TB) benchScenario {
b.Helper()
db := sqlstore.InitTestDB(b)
IDs := map[int64]struct{}{}
opts := sqlstore.NativeSettingsForDialect(db.GetDialect())
quotaService := quotatest.New(false, nil)
cfg := setting.NewCfg()
teamSvc := teamimpl.ProvideService(db, cfg)
orgService, err := orgimpl.ProvideService(db, cfg, quotaService)
require.NoError(b, err)
cache := localcache.ProvideService()
userSvc, err := userimpl.ProvideService(db, orgService, cfg, teamSvc, cache, &quotatest.FakeQuotaService{}, bundleregistry.ProvideService())
require.NoError(b, err)
origNewGuardian := guardian.New
guardian.MockDashboardGuardian(&guardian.FakeDashboardGuardian{CanSaveValue: true, CanViewValue: true})
b.Cleanup(func() {
guardian.New = origNewGuardian
})
var orgID int64 = 1
userIDs := make([]int64, 0, TEAM_MEMBER_NUM)
for i := 0; i < TEAM_MEMBER_NUM; i++ {
u, err := userSvc.Create(context.Background(), &user.CreateUserCommand{
OrgID: orgID,
Login: fmt.Sprintf("user%d", i),
})
require.NoError(b, err)
require.NotZero(b, u.ID)
userIDs = append(userIDs, u.ID)
}
signedInUser := user.SignedInUser{UserID: userIDs[0], OrgID: orgID, Permissions: map[int64]map[string][]string{
orgID: {dashboards.ActionFoldersCreate: {}, dashboards.ActionFoldersWrite: {dashboards.ScopeFoldersAll}},
}}
now := time.Now()
roles := make([]accesscontrol.Role, 0, TEAM_NUM)
teams := make([]team.Team, 0, TEAM_NUM)
teamMembers := make([]team.TeamMember, 0, TEAM_MEMBER_NUM)
teamRoles := make([]accesscontrol.TeamRole, 0, TEAM_NUM)
for i := 1; i < TEAM_NUM+1; i++ {
teamID := int64(i)
teams = append(teams, team.Team{
UID: fmt.Sprintf("team%d", i),
ID: teamID,
Name: fmt.Sprintf("team%d", i),
OrgID: orgID,
Created: now,
Updated: now,
})
signedInUser.Teams = append(signedInUser.Teams, teamID)
for _, userID := range userIDs {
teamMembers = append(teamMembers, team.TeamMember{
UserID: userID,
TeamID: teamID,
OrgID: orgID,
Permission: dashboards.PERMISSION_VIEW,
Created: now,
Updated: now,
})
}
name := fmt.Sprintf("managed_team_role_%d", i)
roles = append(roles, accesscontrol.Role{
ID: int64(i),
UID: name,
OrgID: orgID,
Name: name,
Updated: now,
Created: now,
})
teamRoles = append(teamRoles, accesscontrol.TeamRole{
RoleID: int64(i),
OrgID: orgID,
TeamID: teamID,
Created: now,
})
}
err = db.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
_, err := sess.BulkInsert("team", teams, opts)
require.NoError(b, err)
_, err = sess.BulkInsert("team_member", teamMembers, opts)
require.NoError(b, err)
_, err = sess.BulkInsert("role", roles, opts)
require.NoError(b, err)
_, err = sess.BulkInsert("team_role", teamRoles, opts)
return err
})
require.NoError(b, err)
foldersCap := LEVEL0_FOLDER_NUM + LEVEL0_FOLDER_NUM*LEVEL1_FOLDER_NUM + LEVEL0_FOLDER_NUM*LEVEL1_FOLDER_NUM*LEVEL2_FOLDER_NUM
folders := make([]*f, 0, foldersCap)
dashsCap := LEVEL0_FOLDER_NUM * LEVEL1_FOLDER_NUM * LEVEL2_FOLDER_NUM * LEVEL2_DASHBOARD_NUM
dashs := make([]*dashboards.Dashboard, 0, foldersCap+dashsCap)
dashTags := make([]*dashboardTag, 0, dashsCap)
permissions := make([]accesscontrol.Permission, 0, foldersCap*2)
for i := 0; i < LEVEL0_FOLDER_NUM; i++ {
f0, d := addFolder(orgID, generateID(IDs), fmt.Sprintf("folder%d", i), nil)
folders = append(folders, f0)
dashs = append(dashs, d)
roleID := int64(i%TEAM_NUM + 1)
permissions = append(permissions, accesscontrol.Permission{
RoleID: roleID,
Action: dashboards.ActionFoldersRead,
Scope: dashboards.ScopeFoldersProvider.GetResourceScopeUID(f0.UID),
Updated: now,
Created: now,
},
accesscontrol.Permission{
RoleID: roleID,
Action: dashboards.ActionDashboardsRead,
Scope: dashboards.ScopeFoldersProvider.GetResourceScopeUID(f0.UID),
Updated: now,
Created: now,
},
)
for j := 0; j < LEVEL0_DASHBOARD_NUM; j++ {
str := fmt.Sprintf("dashboard_%d_%d", i, j)
dashID := generateID(IDs)
dashs = append(dashs, &dashboards.Dashboard{
ID: dashID,
OrgID: signedInUser.OrgID,
IsFolder: false,
UID: str,
FolderID: f0.ID,
Slug: str,
Title: str,
Data: simplejson.New(),
Created: now,
Updated: now,
})
dashTags = append(dashTags, &dashboardTag{
DashboardId: dashID,
Term: fmt.Sprintf("tag%d", j),
})
}
for j := 0; j < LEVEL1_FOLDER_NUM; j++ {
f1, d1 := addFolder(orgID, generateID(IDs), fmt.Sprintf("folder%d_%d", i, j), &f0.UID)
folders = append(folders, f1)
dashs = append(dashs, d1)
for k := 0; k < LEVEL1_DASHBOARD_NUM; k++ {
str := fmt.Sprintf("dashboard_%d_%d_%d", i, j, k)
dashID := generateID(IDs)
dashs = append(dashs, &dashboards.Dashboard{
ID: dashID,
OrgID: signedInUser.OrgID,
IsFolder: false,
UID: str,
FolderID: f1.ID,
Slug: str,
Title: str,
Data: simplejson.New(),
Created: now,
Updated: now,
})
dashTags = append(dashTags, &dashboardTag{
DashboardId: dashID,
Term: fmt.Sprintf("tag%d", k),
})
}
for k := 0; k < LEVEL2_FOLDER_NUM; k++ {
f2, d2 := addFolder(orgID, generateID(IDs), fmt.Sprintf("folder%d_%d_%d", i, j, k), &f1.UID)
folders = append(folders, f2)
dashs = append(dashs, d2)
for l := 0; l < LEVEL2_DASHBOARD_NUM; l++ {
str := fmt.Sprintf("dashboard_%d_%d_%d_%d", i, j, k, l)
dashID := generateID(IDs)
dashs = append(dashs, &dashboards.Dashboard{
ID: dashID,
OrgID: signedInUser.OrgID,
IsFolder: false,
UID: str,
FolderID: f2.ID,
Slug: str,
Title: str,
Data: simplejson.New(),
Created: now,
Updated: now,
})
dashTags = append(dashTags, &dashboardTag{
DashboardId: dashID,
Term: fmt.Sprintf("tag%d", l),
})
}
}
}
}
err = db.WithDbSession(context.Background(), func(sess *sqlstore.DBSession) error {
_, err := sess.BulkInsert("folder", folders, opts)
require.NoError(b, err)
_, err = sess.BulkInsert("dashboard", dashs, opts)
require.NoError(b, err)
_, err = sess.BulkInsert("permission", permissions, opts)
require.NoError(b, err)
_, err = sess.BulkInsert("dashboard_tag", dashTags, opts)
return err
})
require.NoError(b, err)
return benchScenario{
db: db,
cfg: cfg,
signedInUser: &signedInUser,
teamSvc: teamSvc,
userSvc: userSvc,
}
}
func setupServer(b testing.TB, sc benchScenario, features *featuremgmt.FeatureManager) *web.Macaron {
b.Helper()
m := web.New()
initCtx := &contextmodel.ReqContext{}
m.Use(func(c *web.Context) {
initCtx.Context = c
initCtx.Logger = log.New("api-test")
initCtx.SignedInUser = sc.signedInUser
c.Req = c.Req.WithContext(ctxkey.Set(c.Req.Context(), initCtx))
})
license := licensingtest.NewFakeLicensing()
license.On("FeatureEnabled", "accesscontrol.enforcement").Return(true).Maybe()
acSvc := acimpl.ProvideOSSService(sc.cfg, acdb.ProvideService(sc.db), localcache.ProvideService(), features)
quotaSrv := quotatest.New(false, nil)
dashStore, err := database.ProvideDashboardStore(sc.db, sc.db.Cfg, features, tagimpl.ProvideService(sc.db, sc.db.Cfg), quotaSrv)
require.NoError(b, err)
folderStore := folderimpl.ProvideDashboardFolderStore(sc.db)
ac := acimpl.ProvideAccessControl(sc.cfg)
folderServiceWithFlagOn := folderimpl.ProvideService(ac, bus.ProvideBus(tracing.InitializeTracerForTest()), sc.cfg, dashStore, folderStore, sc.db, features)
folderPermissions, err := ossaccesscontrol.ProvideFolderPermissions(
sc.cfg, routing.NewRouteRegister(), sc.db, ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc)
require.NoError(b, err)
dashboardPermissions, err := ossaccesscontrol.ProvideDashboardPermissions(
sc.cfg, routing.NewRouteRegister(), sc.db, ac, license, &dashboards.FakeDashboardStore{}, folderServiceWithFlagOn, acSvc, sc.teamSvc, sc.userSvc)
require.NoError(b, err)
dashboardSvc, err := dashboardservice.ProvideDashboardServiceImpl(
sc.cfg, dashStore, folderStore, nil,
features, folderPermissions, dashboardPermissions, ac,
folderServiceWithFlagOn,
)
require.NoError(b, err)
starSvc := startest.NewStarServiceFake()
starSvc.ExpectedUserStars = &star.GetUserStarsResult{UserStars: make(map[int64]bool)}
hs := &HTTPServer{
CacheService: localcache.New(5*time.Minute, 10*time.Minute),
Cfg: sc.cfg,
SQLStore: sc.db,
Features: features,
QuotaService: quotaSrv,
SearchService: search.ProvideService(sc.cfg, sc.db, starSvc, dashboardSvc),
folderService: folderServiceWithFlagOn,
}
m.Get("/api/folders", hs.GetFolders)
m.Get("/api/search", hs.Search)
return m
}
type f struct {
ID int64 `xorm:"pk autoincr 'id'"`
OrgID int64 `xorm:"org_id"`
UID string `xorm:"uid"`
ParentUID *string `xorm:"parent_uid"`
Title string
Description string
Created time.Time
Updated time.Time
}
func (f *f) TableName() string {
return "folder"
}
// SQL bean helper to save tags
type dashboardTag struct {
Id int64
DashboardId int64
Term string
}
func addFolder(orgID int64, id int64, uid string, parentUID *string) (*f, *dashboards.Dashboard) {
now := time.Now()
title := uid
f := &f{
OrgID: orgID,
UID: uid,
Title: title,
ID: id,
Created: now,
Updated: now,
ParentUID: parentUID,
}
d := &dashboards.Dashboard{
ID: id,
OrgID: orgID,
UID: uid,
Version: 1,
Title: title,
Data: simplejson.NewFromAny(map[string]interface{}{"schemaVersion": 17, "title": title, "uid": uid, "version": 1}),
IsFolder: true,
Created: now,
Updated: now,
}
return f, d
}
func generateID(reserved map[int64]struct{}) int64 {
n := rand.Int63n(MAXIMUM_INT_POSTGRES)
if _, existing := reserved[n]; existing {
return generateID(reserved)
}
reserved[n] = struct{}{}
return n
}

View File

@ -44,6 +44,10 @@ load(
"scripts/drone/pipelines/lint_frontend.star",
"lint_frontend_pipeline",
)
load(
"scripts/drone/pipelines/benchmarks.star",
"integration_benchmarks",
)
ver_mode = "pr"
trigger = {
@ -133,6 +137,9 @@ def pr_pipelines():
),
docs_pipelines(ver_mode, trigger_docs_pr()),
shellcheck_pipeline(),
integration_benchmarks(
prefix = ver_mode,
),
]
def get_pr_trigger(include_paths = None, exclude_paths = None):

View File

@ -0,0 +1,83 @@
"""
This module returns the pipeline used for integration benchmarks.
"""
load(
"scripts/drone/steps/lib.star",
"compile_build_cmd",
"enterprise_setup_step",
"integration_benchmarks_step",
"verify_gen_cue_step",
"verify_gen_jsonnet_step",
"wire_install_step",
)
load(
"scripts/drone/services/services.star",
"integration_test_services",
"integration_test_services_volumes",
)
load(
"scripts/drone/utils/utils.star",
"pipeline",
)
def integration_benchmarks(prefix):
"""Generate a pipeline for integration tests.
Args:
prefix: used in the naming of the pipeline.
Returns:
Drone pipeline.
"""
environment = {"EDITION": "oss"}
services = integration_test_services()
volumes = integration_test_services_volumes()
# In pull requests, attempt to clone grafana enterprise.
init_steps = [enterprise_setup_step(isPromote = True)]
verify_step = verify_gen_cue_step()
verify_jsonnet_step = verify_gen_jsonnet_step()
# Ensure that verif_gen_cue happens after we clone enterprise
# At the time of writing this, very_gen_cue is depended on by the wire step which is what everything else depends on.
verify_step["depends_on"].append("clone-enterprise")
verify_jsonnet_step["depends_on"].append("clone-enterprise")
init_steps += [
compile_build_cmd(),
verify_step,
verify_jsonnet_step,
wire_install_step(),
]
benchmark_steps = [
integration_benchmarks_step("sqlite"),
integration_benchmarks_step("postgres", {
"PGPASSWORD": "grafanatest",
"GRAFANA_TEST_DB": "postgres",
"POSTGRES_HOST": "postgres",
}),
integration_benchmarks_step("mysql-5.7", {
"GRAFANA_TEST_DB": "mysql",
"MYSQL_HOST": "mysql57",
}),
integration_benchmarks_step("mysql-8.0", {
"GRAFANA_TEST_DB": "mysql",
"MYSQL_HOST": "mysql80",
}),
]
return pipeline(
name = "{}-integration-benchmarks".format(prefix),
edition = "oss",
trigger = {
"event": ["promote"],
"target": ["gobenchmarks"],
},
environment = environment,
services = services,
volumes = volumes,
steps = init_steps + benchmark_steps,
)

View File

@ -88,8 +88,17 @@ def identify_runner_step(platform = "linux"):
],
}
def enterprise_setup_step(source = "${DRONE_SOURCE_BRANCH}", canFail = True):
step = clone_enterprise_step_pr(source = source, target = "${DRONE_TARGET_BRANCH}", canFail = canFail, location = "../grafana-enterprise")
def enterprise_setup_step(source = "${DRONE_SOURCE_BRANCH}", canFail = True, isPromote = False):
"""Setup the enterprise source into the ./grafana-enterprise directory.
Args:
source: controls which revision of grafana-enterprise is checked out, if it exists. The name 'source' derives from the 'source branch' of a pull request.
canFail: controls whether the step can fail. This is useful for pull requests where the enterprise source may not exist.
isPromote: controls whether or not this step is being used in a promote pipeline. If it is, then the clone enterprise step will not check if the pull request is a fork.
Returns:
Drone step.
"""
step = clone_enterprise_step_pr(source = source, target = "${DRONE_TARGET_BRANCH}", canFail = canFail, location = "../grafana-enterprise", isPromote = isPromote)
step["commands"] += [
"cd ../",
"ln -s src grafana",
@ -122,7 +131,7 @@ def clone_enterprise_step(source = "${DRONE_COMMIT}"):
return step
def clone_enterprise_step_pr(source = "${DRONE_COMMIT}", target = "main", canFail = False, location = "grafana-enterprise"):
def clone_enterprise_step_pr(source = "${DRONE_COMMIT}", target = "main", canFail = False, location = "grafana-enterprise", isPromote = False):
"""Clone the enterprise source into the ./grafana-enterprise directory.
Args:
@ -130,9 +139,19 @@ def clone_enterprise_step_pr(source = "${DRONE_COMMIT}", target = "main", canFai
target: controls which revision of grafana-enterprise is checked out, if it 'source' does not exist. The name 'target' derives from the 'target branch' of a pull request. If this does not exist, then 'main' will be checked out.
canFail: controls whether or not this step is allowed to fail. If it fails and this is true, then the pipeline will continue. canFail is used in pull request pipelines where enterprise may be cloned but may not clone in forks.
location: the path where grafana-enterprise is cloned.
isPromote: controls whether or not this step is being used in a promote pipeline. If it is, then the step will not check if the pull request is a fork.
Returns:
Drone step.
"""
if isPromote:
check = []
else:
check = [
'is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork)',
'if [ "$is_fork" != false ]; then return 1; fi', # Only clone if we're confident that 'fork' is 'false'. Fail if it's also empty.
]
step = {
"name": "clone-enterprise",
"image": images["build_image"],
@ -140,8 +159,7 @@ def clone_enterprise_step_pr(source = "${DRONE_COMMIT}", target = "main", canFai
"GITHUB_TOKEN": from_secret("github_token"),
},
"commands": [
'is_fork=$(curl "https://$GITHUB_TOKEN@api.github.com/repos/grafana/grafana/pulls/$DRONE_PULL_REQUEST" | jq .head.repo.fork)',
'if [ "$is_fork" != false ]; then return 1; fi', # Only clone if we're confident that 'fork' is 'false'. Fail if it's also empty.
] + check + [
'git clone "https://$${GITHUB_TOKEN}@github.com/grafana/grafana-enterprise.git" ' + location,
"cd {}".format(location),
'if git checkout {0}; then echo "checked out {0}"; elif git checkout {1}; then echo "git checkout {1}"; else git checkout main; fi'.format(source, target),
@ -1204,6 +1222,27 @@ def publish_images_step(edition, ver_mode, docker_repo, trigger = None):
return step
def integration_tests_step(name, cmds, environment = None):
step = {
"name": "{}-integration-tests".format(name),
"image": images["build_image"],
"depends_on": ["wire-install"],
"commands": cmds,
}
if environment:
step["environment"] = environment
return step
def integration_benchmarks_step(name, environment = None):
cmds = [
"if [ -z ${GO_PACKAGES} ]; then echo 'missing GO_PACKAGES'; false; fi",
"go test -v -run=^$ -benchmem -timeout=1h -count=8 -bench=. ${GO_PACKAGES}",
]
return integration_tests_step("{}-benchmark".format(name), cmds, environment)
def postgres_integration_tests_step():
cmds = [
"apt-get update",
@ -1214,18 +1253,15 @@ def postgres_integration_tests_step():
"go clean -testcache",
"go test -p=1 -count=1 -covermode=atomic -timeout=5m -run '^TestIntegration' $(find ./pkg -type f -name '*_test.go' -exec grep -l '^func TestIntegration' '{}' '+' | grep -o '\\(.*\\)/' | sort -u)",
]
return {
"name": "postgres-integration-tests",
"image": images["build_image"],
"depends_on": ["wire-install"],
"environment": {
"PGPASSWORD": "grafanatest",
"GRAFANA_TEST_DB": "postgres",
"POSTGRES_HOST": "postgres",
},
"commands": cmds,
environment = {
"PGPASSWORD": "grafanatest",
"GRAFANA_TEST_DB": "postgres",
"POSTGRES_HOST": "postgres",
}
return integration_tests_step("postgres", cmds, environment)
def mysql_integration_tests_step(hostname, version):
cmds = [
"apt-get update",
@ -1235,47 +1271,40 @@ def mysql_integration_tests_step(hostname, version):
"go clean -testcache",
"go test -p=1 -count=1 -covermode=atomic -timeout=5m -run '^TestIntegration' $(find ./pkg -type f -name '*_test.go' -exec grep -l '^func TestIntegration' '{}' '+' | grep -o '\\(.*\\)/' | sort -u)",
]
return {
"name": "mysql-{}-integration-tests".format(version),
"image": images["build_image"],
"depends_on": ["wire-install"],
"environment": {
"GRAFANA_TEST_DB": "mysql",
"MYSQL_HOST": hostname,
},
"commands": cmds,
environment = {
"GRAFANA_TEST_DB": "mysql",
"MYSQL_HOST": hostname,
}
return integration_tests_step("mysql-{}".format(version), cmds, environment)
def redis_integration_tests_step():
return {
"name": "redis-integration-tests",
"image": images["build_image"],
"depends_on": ["wire-install"],
"environment": {
"REDIS_URL": "redis://redis:6379/0",
},
"commands": [
"dockerize -wait tcp://redis:6379/0 -timeout 120s",
"go clean -testcache",
"go test -run IntegrationRedis -covermode=atomic -timeout=2m ./pkg/...",
],
cmds = [
"dockerize -wait tcp://redis:6379/0 -timeout 120s",
"go clean -testcache",
"go test -run IntegrationRedis -covermode=atomic -timeout=2m ./pkg/...",
]
environment = {
"REDIS_URL": "redis://redis:6379/0",
}
return integration_tests_step("redis", cmds, environment)
def memcached_integration_tests_step():
return {
"name": "memcached-integration-tests",
"image": images["build_image"],
"depends_on": ["wire-install"],
"environment": {
"MEMCACHED_HOSTS": "memcached:11211",
},
"commands": [
"dockerize -wait tcp://memcached:11211 -timeout 120s",
"go clean -testcache",
"go test -run IntegrationMemcached -covermode=atomic -timeout=2m ./pkg/...",
],
cmds = [
"dockerize -wait tcp://memcached:11211 -timeout 120s",
"go clean -testcache",
"go test -run IntegrationMemcached -covermode=atomic -timeout=2m ./pkg/...",
]
environment = {
"MEMCACHED_HOSTS": "memcached:11211",
}
return integration_tests_step("memcached", cmds, environment)
def release_canary_npm_packages_step(trigger = None):
"""Releases canary NPM packages.