mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
CloudMigrations: Break snapshot resources out into their own table (#89575)
* create a new table for migration resources * remove raw result bytes from db * more snapshot resource management stuff * integrate new table with snapshots * pass in result limit and offset as params * combine create and update * set up xorm store test * add unit tests * save some cpu * remove unneeded arg * regen swagger * fix bug with result processing * fix update create logic so that uid isn't required for lookup * change offset to page * regen swagger * revert accidental changes to file * curl command page should be 1 indexed
This commit is contained in:
parent
dfee2720cc
commit
4d69213829
@ -412,23 +412,32 @@ func (cma *CloudMigrationAPI) GetSnapshot(c *contextmodel.ReqContext) response.R
|
||||
return response.ErrOrFallback(http.StatusBadRequest, "invalid snapshot uid", err)
|
||||
}
|
||||
|
||||
snapshot, err := cma.cloudMigrationService.GetSnapshot(ctx, sessUid, snapshotUid)
|
||||
q := cloudmigration.GetSnapshotsQuery{
|
||||
SnapshotUID: snapshotUid,
|
||||
SessionUID: sessUid,
|
||||
ResultPage: c.QueryInt("resultPage"),
|
||||
ResultLimit: c.QueryInt("resultLimit"),
|
||||
}
|
||||
if q.ResultLimit == 0 {
|
||||
q.ResultLimit = 100
|
||||
}
|
||||
if q.ResultPage < 1 {
|
||||
q.ResultPage = 1
|
||||
}
|
||||
snapshot, err := cma.cloudMigrationService.GetSnapshot(ctx, q)
|
||||
if err != nil {
|
||||
return response.ErrOrFallback(http.StatusInternalServerError, "error retrieving snapshot", err)
|
||||
}
|
||||
|
||||
result, err := snapshot.GetSnapshotResult()
|
||||
if err != nil {
|
||||
return response.ErrOrFallback(http.StatusInternalServerError, "error snapshot reading snapshot results", err)
|
||||
}
|
||||
results := snapshot.Resources
|
||||
|
||||
dtoResults := make([]MigrateDataResponseItemDTO, len(result))
|
||||
for i := 0; i < len(result); i++ {
|
||||
dtoResults := make([]MigrateDataResponseItemDTO, len(results))
|
||||
for i := 0; i < len(results); i++ {
|
||||
dtoResults[i] = MigrateDataResponseItemDTO{
|
||||
Type: MigrateDataType(result[i].Type),
|
||||
RefID: result[i].RefID,
|
||||
Status: ItemStatus(result[i].Status),
|
||||
Error: result[i].Error,
|
||||
Type: MigrateDataType(results[i].Type),
|
||||
RefID: results[i].RefID,
|
||||
Status: ItemStatus(results[i].Status),
|
||||
Error: results[i].Error,
|
||||
}
|
||||
}
|
||||
|
||||
@ -467,11 +476,14 @@ func (cma *CloudMigrationAPI) GetSnapshotList(c *contextmodel.ReqContext) respon
|
||||
q := cloudmigration.ListSnapshotsQuery{
|
||||
SessionUID: uid,
|
||||
Limit: c.QueryInt("limit"),
|
||||
Offset: c.QueryInt("offset"),
|
||||
Page: c.QueryInt("page"),
|
||||
}
|
||||
if q.Limit == 0 {
|
||||
q.Limit = 100
|
||||
}
|
||||
if q.Page < 1 {
|
||||
q.Page = 1
|
||||
}
|
||||
|
||||
snapshotList, err := cma.cloudMigrationService.GetSnapshotList(ctx, q)
|
||||
if err != nil {
|
||||
|
@ -11,10 +11,10 @@ curl -X POST -H "Content-Type: application/json" \
|
||||
http://admin:admin@localhost:3000/api/cloudmigration/migration/{sessionUid}/snapshot
|
||||
|
||||
[get snapshot list]
|
||||
curl -X GET http://admin:admin@localhost:3000/api/cloudmigration/migration/{sessionUid}/snapshots?limit=100&offset=0
|
||||
curl -X GET http://admin:admin@localhost:3000/api/cloudmigration/migration/{sessionUid}/snapshots?limit=100&page=1
|
||||
|
||||
[get snapshot]
|
||||
curl -X GET http://admin:admin@localhost:3000/api/cloudmigration/migration/{sessionUid}/snapshot/{snapshotUid}
|
||||
curl -X GET http://admin:admin@localhost:3000/api/cloudmigration/migration/{sessionUid}/snapshot/{snapshotUid}?resultLimit=100&resultPage=1
|
||||
|
||||
[upload snapshot]
|
||||
curl -X POST -H "Content-Type: application/json" \
|
||||
|
@ -128,8 +128,10 @@ const (
|
||||
type ItemStatus string
|
||||
|
||||
const (
|
||||
ItemStatusOK ItemStatus = "OK"
|
||||
ItemStatusError ItemStatus = "ERROR"
|
||||
ItemStatusOK ItemStatus = "OK"
|
||||
ItemStatusError ItemStatus = "ERROR"
|
||||
ItemStatusPending ItemStatus = "PENDING"
|
||||
ItemStatusUnknown ItemStatus = "UNKNOWN"
|
||||
)
|
||||
|
||||
// swagger:parameters getCloudMigrationRun
|
||||
@ -268,6 +270,18 @@ type CreateSnapshotResponseDTO struct {
|
||||
|
||||
// swagger:parameters getSnapshot
|
||||
type GetSnapshotParams struct {
|
||||
// ResultPage is used for pagination with ResultLimit
|
||||
// in:query
|
||||
// required:false
|
||||
// default: 1
|
||||
ResultPage int `json:"resultPage"`
|
||||
|
||||
// Max limit for snapshot results returned.
|
||||
// in:query
|
||||
// required:false
|
||||
// default: 100
|
||||
ResultLimit int `json:"resultLimit"`
|
||||
|
||||
// Session UID of a session
|
||||
// in: path
|
||||
UID string `json:"uid"`
|
||||
@ -290,16 +304,18 @@ type GetSnapshotResponseDTO struct {
|
||||
|
||||
// swagger:parameters getShapshotList
|
||||
type GetSnapshotListParams struct {
|
||||
// Offset is used for pagination with limit
|
||||
// Page is used for pagination with limit
|
||||
// in:query
|
||||
// required:false
|
||||
// default: 0
|
||||
Offset int `json:"offset"`
|
||||
// default: 1
|
||||
Page int `json:"page"`
|
||||
|
||||
// Max limit for results returned.
|
||||
// in:query
|
||||
// required:false
|
||||
// default: 100
|
||||
Limit int `json:"limit"`
|
||||
|
||||
// Session UID of a session
|
||||
// in: path
|
||||
UID string `json:"uid"`
|
||||
|
@ -25,7 +25,7 @@ type Service interface {
|
||||
GetMigrationRunList(ctx context.Context, migUID string) (*CloudMigrationRunList, error)
|
||||
|
||||
CreateSnapshot(ctx context.Context, sessionUid string) (*CloudMigrationSnapshot, error)
|
||||
GetSnapshot(ctx context.Context, sessionUid string, snapshotUid string) (*CloudMigrationSnapshot, error)
|
||||
GetSnapshot(ctx context.Context, query GetSnapshotsQuery) (*CloudMigrationSnapshot, error)
|
||||
GetSnapshotList(ctx context.Context, query ListSnapshotsQuery) ([]CloudMigrationSnapshot, error)
|
||||
UploadSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error
|
||||
CancelSnapshot(ctx context.Context, sessionUid string, snapshotUid string) error
|
||||
|
@ -404,16 +404,10 @@ func (s *Service) RunMigration(ctx context.Context, uid string) (*cloudmigration
|
||||
return nil, fmt.Errorf("migrate data error: %w", err)
|
||||
}
|
||||
|
||||
respData, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
s.log.Error("error marshalling migration response data: %w", err)
|
||||
return nil, fmt.Errorf("marshalling migration response data: %w", err)
|
||||
}
|
||||
|
||||
// save the result of the migration
|
||||
runUID, err := s.createMigrationRun(ctx, cloudmigration.CloudMigrationSnapshot{
|
||||
SessionUID: migration.UID,
|
||||
Result: respData,
|
||||
Resources: resp.Items,
|
||||
})
|
||||
if err != nil {
|
||||
response.Error(http.StatusInternalServerError, "migration run save error", err)
|
||||
@ -513,11 +507,12 @@ func (s *Service) CreateSnapshot(ctx context.Context, sessionUid string) (*cloud
|
||||
}
|
||||
|
||||
// GetSnapshot returns the on-prem version of a snapshot, supplemented with processing status from GMS
|
||||
func (s *Service) GetSnapshot(ctx context.Context, sessionUid string, snapshotUid string) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
func (s *Service) GetSnapshot(ctx context.Context, query cloudmigration.GetSnapshotsQuery) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.GetSnapshot")
|
||||
defer span.End()
|
||||
|
||||
snapshot, err := s.store.GetSnapshotByUID(ctx, snapshotUid)
|
||||
sessionUid, snapshotUid := query.SessionUID, query.SnapshotUID
|
||||
snapshot, err := s.store.GetSnapshotByUID(ctx, snapshotUid, query.ResultPage, query.ResultLimit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching snapshot for uid %s: %w", snapshotUid, err)
|
||||
}
|
||||
@ -534,16 +529,12 @@ func (s *Service) GetSnapshot(ctx context.Context, sessionUid string, snapshotUi
|
||||
return nil, fmt.Errorf("error fetching snapshot status from GMS: sessionUid: %s, snapshotUid: %s", sessionUid, snapshotUid)
|
||||
}
|
||||
|
||||
// grab any result available
|
||||
// TODO: figure out a more intelligent way to do this, will depend on GMS apis
|
||||
snapshot.Result = snapshotMeta.Result
|
||||
|
||||
if snapshotMeta.Status == cloudmigration.SnapshotStatusFinished {
|
||||
// we need to update the snapshot in our db before reporting anything finished to the client
|
||||
if err := s.store.UpdateSnapshot(ctx, cloudmigration.UpdateSnapshotCmd{
|
||||
UID: snapshot.UID,
|
||||
Status: cloudmigration.SnapshotStatusFinished,
|
||||
Result: snapshot.Result,
|
||||
UID: snapshot.UID,
|
||||
Status: cloudmigration.SnapshotStatusFinished,
|
||||
Resources: snapshotMeta.Resources,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("error updating snapshot status: %w", err)
|
||||
}
|
||||
@ -568,7 +559,10 @@ func (s *Service) UploadSnapshot(ctx context.Context, sessionUid string, snapsho
|
||||
ctx, span := s.tracer.Start(ctx, "CloudMigrationService.UploadSnapshot")
|
||||
defer span.End()
|
||||
|
||||
snapshot, err := s.GetSnapshot(ctx, sessionUid, snapshotUid)
|
||||
snapshot, err := s.GetSnapshot(ctx, cloudmigration.GetSnapshotsQuery{
|
||||
SnapshotUID: snapshotUid,
|
||||
SessionUID: sessionUid,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching snapshot with uid %s: %w", snapshotUid, err)
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func (s *NoopServiceImpl) CreateSnapshot(ctx context.Context, sessionUid string)
|
||||
return nil, cloudmigration.ErrFeatureDisabledError
|
||||
}
|
||||
|
||||
func (s *NoopServiceImpl) GetSnapshot(ctx context.Context, sessionUid string, snapshotUid string) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
func (s *NoopServiceImpl) GetSnapshot(ctx context.Context, query cloudmigration.GetSnapshotsQuery) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
return nil, cloudmigration.ErrFeatureDisabledError
|
||||
}
|
||||
|
||||
|
@ -145,18 +145,28 @@ func Test_ExecuteAsyncWorkflow(t *testing.T) {
|
||||
require.Equal(t, sessionUid, snapshotResp.SessionUID)
|
||||
snapshotUid := snapshotResp.UID
|
||||
|
||||
snapshot, err := s.GetSnapshot(ctxWithSignedInUser(), sessionUid, snapshotUid)
|
||||
// Service doesn't currently expose updating a snapshot externally, so we will just manually add a resource
|
||||
err = (s.(*Service)).store.CreateUpdateSnapshotResources(context.Background(), snapshotUid, []cloudmigration.CloudMigrationResource{{Type: cloudmigration.DashboardDataType, RefID: "qwerty", Status: cloudmigration.ItemStatusOK}})
|
||||
assert.NoError(t, err)
|
||||
|
||||
snapshot, err := s.GetSnapshot(ctxWithSignedInUser(), cloudmigration.GetSnapshotsQuery{
|
||||
SnapshotUID: snapshotUid,
|
||||
SessionUID: sessionUid,
|
||||
ResultPage: 1,
|
||||
ResultLimit: 100,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, snapshotResp.UID, snapshot.UID)
|
||||
assert.Equal(t, snapshotResp.EncryptionKey, snapshot.EncryptionKey)
|
||||
assert.Empty(t, snapshot.Result) // will change once we create a new table for migration items
|
||||
assert.Len(t, snapshot.Resources, 1)
|
||||
assert.Equal(t, "qwerty", snapshot.Resources[0].RefID)
|
||||
|
||||
snapshots, err := s.GetSnapshotList(ctxWithSignedInUser(), cloudmigration.ListSnapshotsQuery{SessionUID: sessionUid, Limit: 100})
|
||||
snapshots, err := s.GetSnapshotList(ctxWithSignedInUser(), cloudmigration.ListSnapshotsQuery{SessionUID: sessionUid, Page: 1, Limit: 100})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, snapshots, 1)
|
||||
assert.Equal(t, snapshotResp.UID, snapshots[0].UID)
|
||||
assert.Equal(t, snapshotResp.EncryptionKey, snapshots[0].EncryptionKey)
|
||||
assert.Empty(t, snapshots[0].Result) // should remain this way even after we create a new table
|
||||
assert.Empty(t, snapshots[0].Resources)
|
||||
|
||||
err = s.UploadSnapshot(ctxWithSignedInUser(), sessionUid, snapshotUid)
|
||||
require.NoError(t, err)
|
||||
|
@ -2,7 +2,6 @@ package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -93,7 +92,7 @@ func (m FakeServiceImpl) RunMigration(_ context.Context, _ string) (*cloudmigrat
|
||||
func fakeMigrateDataResponseDTO() cloudmigration.MigrateDataResponse {
|
||||
return cloudmigration.MigrateDataResponse{
|
||||
RunUID: "fake_uid",
|
||||
Items: []cloudmigration.MigrateDataResponseItem{
|
||||
Items: []cloudmigration.CloudMigrationResource{
|
||||
{Type: "type", RefID: "make_refid", Status: "ok", Error: "none"},
|
||||
},
|
||||
}
|
||||
@ -107,15 +106,11 @@ func (m FakeServiceImpl) GetMigrationStatus(_ context.Context, _ string) (*cloud
|
||||
if m.ReturnError {
|
||||
return nil, fmt.Errorf("mock error")
|
||||
}
|
||||
result, err := json.Marshal(fakeMigrateDataResponseDTO())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cloudmigration.CloudMigrationSnapshot{
|
||||
ID: 0,
|
||||
UID: "fake_uid",
|
||||
SessionUID: "fake_mig_uid",
|
||||
Result: result,
|
||||
Resources: fakeMigrateDataResponseDTO().Items,
|
||||
Created: fixedDate,
|
||||
Updated: fixedDate,
|
||||
Finished: fixedDate,
|
||||
@ -145,7 +140,7 @@ func (m FakeServiceImpl) CreateSnapshot(ctx context.Context, sessionUid string)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m FakeServiceImpl) GetSnapshot(ctx context.Context, sessionUid string, snapshotUid string) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
func (m FakeServiceImpl) GetSnapshot(ctx context.Context, query cloudmigration.GetSnapshotsQuery) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
if m.ReturnError {
|
||||
return nil, fmt.Errorf("mock error")
|
||||
}
|
||||
|
@ -18,6 +18,10 @@ type store interface {
|
||||
|
||||
CreateSnapshot(ctx context.Context, snapshot cloudmigration.CloudMigrationSnapshot) (string, error)
|
||||
UpdateSnapshot(ctx context.Context, snapshot cloudmigration.UpdateSnapshotCmd) error
|
||||
GetSnapshotByUID(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSnapshot, error)
|
||||
GetSnapshotByUID(ctx context.Context, uid string, resultPage int, resultLimit int) (*cloudmigration.CloudMigrationSnapshot, error)
|
||||
GetSnapshotList(ctx context.Context, query cloudmigration.ListSnapshotsQuery) ([]cloudmigration.CloudMigrationSnapshot, error)
|
||||
|
||||
CreateUpdateSnapshotResources(ctx context.Context, snapshotUid string, resources []cloudmigration.CloudMigrationResource) error
|
||||
GetSnapshotResources(ctx context.Context, snapshotUid string, page int, limit int) ([]cloudmigration.CloudMigrationResource, error)
|
||||
DeleteSnapshotResources(ctx context.Context, snapshotUid string) error
|
||||
}
|
||||
|
@ -150,9 +150,7 @@ func (ss *sqlStore) CreateSnapshot(ctx context.Context, snapshot cloudmigration.
|
||||
if err := ss.encryptKey(ctx, &snapshot); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if snapshot.Result == nil {
|
||||
snapshot.Result = make([]byte, 0)
|
||||
}
|
||||
|
||||
if snapshot.UID == "" {
|
||||
snapshot.UID = util.GenerateShortUID()
|
||||
}
|
||||
@ -181,38 +179,31 @@ func (ss *sqlStore) UpdateSnapshot(ctx context.Context, update cloudmigration.Up
|
||||
}
|
||||
err := ss.db.InTransaction(ctx, func(ctx context.Context) error {
|
||||
// Update status if set
|
||||
if err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
|
||||
if update.Status != "" {
|
||||
if update.Status != "" {
|
||||
if err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
|
||||
rawSQL := "UPDATE cloud_migration_snapshot SET status=? WHERE uid=?"
|
||||
if _, err := sess.Exec(rawSQL, update.Status, update.UID); err != nil {
|
||||
return fmt.Errorf("updating snapshot status for uid %s: %w", update.UID, err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update result if set
|
||||
if err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
|
||||
if len(update.Result) > 0 {
|
||||
rawSQL := "UPDATE cloud_migration_snapshot SET result=? WHERE uid=?"
|
||||
if _, err := sess.Exec(rawSQL, update.Result, update.UID); err != nil {
|
||||
return fmt.Errorf("updating snapshot result for uid %s: %w", update.UID, err)
|
||||
}
|
||||
// Update resources if set
|
||||
if len(update.Resources) > 0 {
|
||||
if err := ss.CreateUpdateSnapshotResources(ctx, update.UID, update.Resources); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, uid string) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, uid string, resultPage int, resultLimit int) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
var snapshot cloudmigration.CloudMigrationSnapshot
|
||||
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
exist, err := sess.Where("uid=?", uid).Get(&snapshot)
|
||||
@ -224,18 +215,28 @@ func (ss *sqlStore) GetSnapshotByUID(ctx context.Context, uid string) (*cloudmig
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ss.decryptKey(ctx, &snapshot); err != nil {
|
||||
return &snapshot, err
|
||||
}
|
||||
|
||||
resources, err := ss.GetSnapshotResources(ctx, uid, resultPage, resultLimit)
|
||||
if err == nil {
|
||||
snapshot.Resources = resources
|
||||
}
|
||||
|
||||
return &snapshot, err
|
||||
}
|
||||
|
||||
// GetSnapshotList returns snapshots without resources included. Use GetSnapshotByUID to get individual snapshot results.
|
||||
func (ss *sqlStore) GetSnapshotList(ctx context.Context, query cloudmigration.ListSnapshotsQuery) ([]cloudmigration.CloudMigrationSnapshot, error) {
|
||||
var snapshots = make([]cloudmigration.CloudMigrationSnapshot, 0)
|
||||
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
sess.Limit(query.Limit, query.Offset)
|
||||
offset := (query.Page - 1) * query.Limit
|
||||
sess.Limit(query.Limit, offset)
|
||||
return sess.Find(&snapshots, &cloudmigration.CloudMigrationSnapshot{
|
||||
SessionUID: query.SessionUID,
|
||||
})
|
||||
@ -252,6 +253,70 @@ func (ss *sqlStore) GetSnapshotList(ctx context.Context, query cloudmigration.Li
|
||||
return snapshots, nil
|
||||
}
|
||||
|
||||
func (ss *sqlStore) CreateUpdateSnapshotResources(ctx context.Context, snapshotUid string, resources []cloudmigration.CloudMigrationResource) error {
|
||||
// ensure snapshot_uids are consistent so that we can use them to query when uid isn't known
|
||||
for i := 0; i < len(resources); i++ {
|
||||
resources[i].SnapshotUID = snapshotUid
|
||||
}
|
||||
|
||||
return ss.db.InTransaction(ctx, func(ctx context.Context) error {
|
||||
sql := "UPDATE cloud_migration_resource SET status=?, error_string=? WHERE uid=? OR (snapshot_uid=? AND resource_uid=?)"
|
||||
err := ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
|
||||
for _, r := range resources {
|
||||
// try an update first
|
||||
result, err := sess.Exec(sql, r.Status, r.Error, r.UID, snapshotUid, r.RefID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// if this had no effect, assign a uid and insert instead
|
||||
n, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n == 0 {
|
||||
r.UID = util.GenerateShortUID()
|
||||
_, err := sess.Insert(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating resources: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ss *sqlStore) GetSnapshotResources(ctx context.Context, snapshotUid string, page int, limit int) ([]cloudmigration.CloudMigrationResource, error) {
|
||||
var resources []cloudmigration.CloudMigrationResource
|
||||
if limit == 0 {
|
||||
return resources, nil
|
||||
}
|
||||
err := ss.db.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
offset := (page - 1) * limit
|
||||
sess.Limit(limit, offset)
|
||||
return sess.Find(&resources, &cloudmigration.CloudMigrationResource{
|
||||
SnapshotUID: snapshotUid,
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func (ss *sqlStore) DeleteSnapshotResources(ctx context.Context, snapshotUid string) error {
|
||||
return ss.db.WithDbSession(ctx, func(sess *sqlstore.DBSession) error {
|
||||
_, err := sess.Delete(cloudmigration.CloudMigrationResource{
|
||||
SnapshotUID: snapshotUid,
|
||||
})
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (ss *sqlStore) encryptToken(ctx context.Context, cm *cloudmigration.CloudMigrationSession) error {
|
||||
s, err := ss.secretsService.Encrypt(ctx, []byte(cm.AuthToken), secrets.WithoutScope())
|
||||
if err != nil {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/grafana/grafana/pkg/services/sqlstore"
|
||||
"github.com/grafana/grafana/pkg/tests/testsuite"
|
||||
"github.com/grafana/grafana/pkg/util"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@ -111,10 +112,9 @@ func Test_CreateMigrationRun(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("creates a session run and retrieves it from db", func(t *testing.T) {
|
||||
result := []byte("OK")
|
||||
cmr := cloudmigration.CloudMigrationSnapshot{
|
||||
SessionUID: "asdfg",
|
||||
Result: result,
|
||||
Status: cloudmigration.SnapshotStatusFinished,
|
||||
}
|
||||
|
||||
createResp, err := s.CreateMigrationRun(ctx, cmr)
|
||||
@ -123,7 +123,7 @@ func Test_CreateMigrationRun(t *testing.T) {
|
||||
|
||||
getMRResp, err := s.GetMigrationStatus(ctx, createResp)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, result, getMRResp.Result)
|
||||
require.Equal(t, cmr.Status, getMRResp.Status)
|
||||
})
|
||||
}
|
||||
|
||||
@ -181,7 +181,7 @@ func Test_SnapshotManagement(t *testing.T) {
|
||||
require.NotEmpty(t, snapshotUid)
|
||||
|
||||
//retrieve it from the db
|
||||
snapshot, err := s.GetSnapshotByUID(ctx, snapshotUid)
|
||||
snapshot, err := s.GetSnapshotByUID(ctx, snapshotUid, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cloudmigration.SnapshotStatusInitializing, string(snapshot.Status))
|
||||
|
||||
@ -190,18 +190,71 @@ func Test_SnapshotManagement(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
//retrieve it again
|
||||
snapshot, err = s.GetSnapshotByUID(ctx, snapshotUid)
|
||||
snapshot, err = s.GetSnapshotByUID(ctx, snapshotUid, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cloudmigration.SnapshotStatusCreating, string(snapshot.Status))
|
||||
|
||||
// lists snapshots and ensures it's in there
|
||||
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUid, Offset: 0, Limit: 100})
|
||||
snapshots, err := s.GetSnapshotList(ctx, cloudmigration.ListSnapshotsQuery{SessionUID: sessionUid, Page: 1, Limit: 100})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, snapshots, 1)
|
||||
require.Equal(t, *snapshot, snapshots[0])
|
||||
})
|
||||
}
|
||||
|
||||
func Test_SnapshotResources(t *testing.T) {
|
||||
_, s := setUpTest(t)
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("tests CRUD of snapshot resources", func(t *testing.T) {
|
||||
// Get the default rows from the test
|
||||
resources, err := s.GetSnapshotResources(ctx, "poiuy", 0, 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, resources, 3)
|
||||
|
||||
// create a new resource and update an existing resource
|
||||
err = s.CreateUpdateSnapshotResources(ctx, "poiuy", []cloudmigration.CloudMigrationResource{
|
||||
{
|
||||
Type: cloudmigration.DatasourceDataType,
|
||||
RefID: "mi39fj",
|
||||
Status: cloudmigration.ItemStatusOK,
|
||||
},
|
||||
{
|
||||
UID: "qwerty",
|
||||
Status: cloudmigration.ItemStatusOK,
|
||||
},
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Get resources again
|
||||
resources, err = s.GetSnapshotResources(ctx, "poiuy", 0, 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, resources, 4)
|
||||
// ensure existing resource was updated
|
||||
for _, r := range resources {
|
||||
if r.UID == "querty" {
|
||||
assert.Equal(t, cloudmigration.ItemStatusOK, r.Status)
|
||||
break
|
||||
}
|
||||
}
|
||||
// ensure a new one was made
|
||||
for _, r := range resources {
|
||||
if r.UID == "mi39fj" {
|
||||
assert.Equal(t, cloudmigration.ItemStatusOK, r.Status)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// delete snapshot resources
|
||||
err = s.DeleteSnapshotResources(ctx, "poiuy")
|
||||
assert.NoError(t, err)
|
||||
// make sure they're gone
|
||||
resources, err = s.GetSnapshotResources(ctx, "poiuy", 0, 100)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, resources, 0)
|
||||
})
|
||||
}
|
||||
|
||||
func setUpTest(t *testing.T) (*sqlstore.SQLStore, *sqlStore) {
|
||||
testDB := db.InitTestDB(t)
|
||||
s := &sqlStore{
|
||||
@ -212,12 +265,12 @@ func setUpTest(t *testing.T) (*sqlstore.SQLStore, *sqlStore) {
|
||||
|
||||
// insert cloud migration test data
|
||||
_, err := testDB.GetSqlxSession().Exec(ctx, `
|
||||
INSERT INTO
|
||||
cloud_migration_session (id, uid, auth_token, slug, stack_id, region_slug, cluster_slug, created, updated)
|
||||
VALUES
|
||||
(1,'qwerty', ?, '11111', 11111, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
|
||||
(2,'asdfgh', ?, '22222', 22222, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
|
||||
(3,'zxcvbn', ?, '33333', 33333, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000');
|
||||
INSERT INTO
|
||||
cloud_migration_session (id, uid, auth_token, slug, stack_id, region_slug, cluster_slug, created, updated)
|
||||
VALUES
|
||||
(1,'qwerty', ?, '11111', 11111, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
|
||||
(2,'asdfgh', ?, '22222', 22222, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000'),
|
||||
(3,'zxcvbn', ?, '33333', 33333, 'test', 'test', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000');
|
||||
`,
|
||||
encodeToken("12345"),
|
||||
encodeToken("6789"),
|
||||
@ -227,16 +280,25 @@ func setUpTest(t *testing.T) (*sqlstore.SQLStore, *sqlStore) {
|
||||
|
||||
// insert cloud migration run test data
|
||||
_, err = testDB.GetSqlxSession().Exec(ctx, `
|
||||
INSERT INTO
|
||||
cloud_migration_snapshot (session_uid, uid, result, created, updated, finished, status)
|
||||
VALUES
|
||||
('qwerty', 'poiuy', ?, '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
|
||||
('qwerty', 'lkjhg', ?, '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
|
||||
('zxcvbn', 'mnbvvc', ?, '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished");
|
||||
`,
|
||||
[]byte("ERROR"),
|
||||
[]byte("OK"),
|
||||
[]byte("OK"),
|
||||
INSERT INTO
|
||||
cloud_migration_snapshot (session_uid, uid, created, updated, finished, status)
|
||||
VALUES
|
||||
('qwerty', 'poiuy', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
|
||||
('qwerty', 'lkjhg', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished"),
|
||||
('zxcvbn', 'mnbvvc', '2024-03-25 15:30:36.000', '2024-03-27 15:30:43.000', '2024-03-27 15:30:43.000', "finished");
|
||||
`,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = testDB.GetSqlxSession().Exec(ctx, `
|
||||
INSERT INTO
|
||||
cloud_migration_resource (uid, snapshot_uid, resource_type, resource_uid, status, error_string)
|
||||
VALUES
|
||||
('mnbvde', 'poiuy', 'DATASOURCE', 'jf38gh', 'OK', ''),
|
||||
('qwerty', 'poiuy', 'DASHBOARD', 'ejcx4d', 'ERROR', 'fake error'),
|
||||
('zxcvbn', 'poiuy', 'FOLDER', 'fi39fj', 'PENDING', ''),
|
||||
('4fi9sd', '39fi39', 'FOLDER', 'fi39fj', 'OK', '');
|
||||
`,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -137,10 +137,10 @@ func convertRequestToDTO(request cloudmigration.MigrateDataRequest) MigrateDataR
|
||||
}
|
||||
|
||||
func convertResponseFromDTO(result MigrateDataResponseDTO) cloudmigration.MigrateDataResponse {
|
||||
items := make([]cloudmigration.MigrateDataResponseItem, len(result.Items))
|
||||
items := make([]cloudmigration.CloudMigrationResource, len(result.Items))
|
||||
for i := 0; i < len(result.Items); i++ {
|
||||
item := result.Items[i]
|
||||
items[i] = cloudmigration.MigrateDataResponseItem{
|
||||
items[i] = cloudmigration.CloudMigrationResource{
|
||||
Type: cloudmigration.MigrateDataType(item.Type),
|
||||
RefID: item.RefID,
|
||||
Status: cloudmigration.ItemStatus(item.Status),
|
||||
|
@ -2,7 +2,6 @@ package gmsclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
@ -29,11 +28,11 @@ func (c *memoryClientImpl) MigrateData(
|
||||
request cloudmigration.MigrateDataRequest,
|
||||
) (*cloudmigration.MigrateDataResponse, error) {
|
||||
result := cloudmigration.MigrateDataResponse{
|
||||
Items: make([]cloudmigration.MigrateDataResponseItem, len(request.Items)),
|
||||
Items: make([]cloudmigration.CloudMigrationResource, len(request.Items)),
|
||||
}
|
||||
|
||||
for i, v := range request.Items {
|
||||
result.Items[i] = cloudmigration.MigrateDataResponseItem{
|
||||
result.Items[i] = cloudmigration.CloudMigrationResource{
|
||||
Type: v.Type,
|
||||
RefID: v.RefID,
|
||||
Status: cloudmigration.ItemStatusOK,
|
||||
@ -60,38 +59,32 @@ func (c *memoryClientImpl) InitializeSnapshot(context.Context, cloudmigration.Cl
|
||||
}
|
||||
|
||||
func (c *memoryClientImpl) GetSnapshotStatus(ctx context.Context, session cloudmigration.CloudMigrationSession, snapshot cloudmigration.CloudMigrationSnapshot) (*cloudmigration.CloudMigrationSnapshot, error) {
|
||||
// just fake an entire response
|
||||
gmsSnapshot := cloudmigration.CloudMigrationSnapshot{
|
||||
Status: cloudmigration.SnapshotStatusFinished,
|
||||
GMSSnapshotUID: util.GenerateShortUID(),
|
||||
Result: []byte{},
|
||||
Finished: time.Now(),
|
||||
}
|
||||
|
||||
result := []cloudmigration.MigrateDataResponseItem{
|
||||
results := []cloudmigration.CloudMigrationResource{
|
||||
{
|
||||
Type: cloudmigration.DashboardDataType,
|
||||
RefID: util.GenerateShortUID(),
|
||||
RefID: "dash1",
|
||||
Status: cloudmigration.ItemStatusOK,
|
||||
},
|
||||
{
|
||||
Type: cloudmigration.DatasourceDataType,
|
||||
RefID: util.GenerateShortUID(),
|
||||
RefID: "ds1",
|
||||
Status: cloudmigration.ItemStatusError,
|
||||
Error: "fake error",
|
||||
},
|
||||
{
|
||||
Type: cloudmigration.FolderDataType,
|
||||
RefID: util.GenerateShortUID(),
|
||||
RefID: "folder1",
|
||||
Status: cloudmigration.ItemStatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
b, err := json.Marshal(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// just fake an entire response
|
||||
gmsSnapshot := cloudmigration.CloudMigrationSnapshot{
|
||||
Status: cloudmigration.SnapshotStatusFinished,
|
||||
GMSSnapshotUID: "gmssnapshotuid",
|
||||
Resources: results,
|
||||
Finished: time.Now(),
|
||||
}
|
||||
gmsSnapshot.Result = b
|
||||
|
||||
return &gmsSnapshot, nil
|
||||
}
|
||||
|
@ -1,8 +1,6 @@
|
||||
package cloudmigration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/grafana/grafana/pkg/apimachinery/errutil"
|
||||
@ -45,8 +43,8 @@ type CloudMigrationSnapshot struct {
|
||||
Updated time.Time
|
||||
Finished time.Time
|
||||
|
||||
// []MigrateDataResponseItem
|
||||
Result []byte `xorm:"result"` //store raw gms response body
|
||||
// Stored in the cloud_migration_resource table
|
||||
Resources []CloudMigrationResource `xorm:"-"`
|
||||
}
|
||||
|
||||
type SnapshotStatus string
|
||||
@ -63,14 +61,24 @@ const (
|
||||
SnapshotStatusUnknown = "unknown"
|
||||
)
|
||||
|
||||
type CloudMigrationResource struct {
|
||||
ID int64 `xorm:"pk autoincr 'id'"`
|
||||
UID string `xorm:"uid"`
|
||||
|
||||
Type MigrateDataType `xorm:"resource_type"`
|
||||
RefID string `xorm:"resource_uid"`
|
||||
Status ItemStatus `xorm:"status"`
|
||||
Error string `xorm:"error_string"`
|
||||
|
||||
SnapshotUID string `xorm:"snapshot_uid"`
|
||||
}
|
||||
|
||||
// Deprecated, use GetSnapshotResult for the async workflow
|
||||
func (s CloudMigrationSnapshot) GetResult() (*MigrateDataResponse, error) {
|
||||
var result MigrateDataResponse
|
||||
err := json.Unmarshal(s.Result, &result)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not parse result of run")
|
||||
result := MigrateDataResponse{
|
||||
RunUID: s.UID,
|
||||
Items: s.Resources,
|
||||
}
|
||||
result.RunUID = s.UID
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
@ -78,17 +86,6 @@ func (s CloudMigrationSnapshot) ShouldQueryGMS() bool {
|
||||
return s.Status == SnapshotStatusPendingProcessing || s.Status == SnapshotStatusProcessing
|
||||
}
|
||||
|
||||
func (s CloudMigrationSnapshot) GetSnapshotResult() ([]MigrateDataResponseItem, error) {
|
||||
var result []MigrateDataResponseItem
|
||||
if len(s.Result) > 0 {
|
||||
err := json.Unmarshal(s.Result, &result)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not parse result of run")
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type CloudMigrationRunList struct {
|
||||
Runs []MigrateDataResponseList
|
||||
}
|
||||
@ -108,16 +105,23 @@ type CloudMigrationSessionListResponse struct {
|
||||
Sessions []CloudMigrationSessionResponse
|
||||
}
|
||||
|
||||
type GetSnapshotsQuery struct {
|
||||
SnapshotUID string
|
||||
SessionUID string
|
||||
ResultPage int
|
||||
ResultLimit int
|
||||
}
|
||||
|
||||
type ListSnapshotsQuery struct {
|
||||
SessionUID string
|
||||
Offset int
|
||||
Page int
|
||||
Limit int
|
||||
}
|
||||
|
||||
type UpdateSnapshotCmd struct {
|
||||
UID string
|
||||
Status SnapshotStatus
|
||||
Result []byte //store raw gms response body
|
||||
UID string
|
||||
Status SnapshotStatus
|
||||
Resources []CloudMigrationResource
|
||||
}
|
||||
|
||||
// access token
|
||||
@ -172,26 +176,21 @@ type MigrateDataRequestItem struct {
|
||||
type ItemStatus string
|
||||
|
||||
const (
|
||||
ItemStatusOK ItemStatus = "OK"
|
||||
ItemStatusError ItemStatus = "ERROR"
|
||||
ItemStatusOK ItemStatus = "OK"
|
||||
ItemStatusError ItemStatus = "ERROR"
|
||||
ItemStatusPending ItemStatus = "PENDING"
|
||||
ItemStatusUnknown ItemStatus = "UNKNOWN"
|
||||
)
|
||||
|
||||
type MigrateDataResponse struct {
|
||||
RunUID string
|
||||
Items []MigrateDataResponseItem
|
||||
Items []CloudMigrationResource
|
||||
}
|
||||
|
||||
type MigrateDataResponseList struct {
|
||||
RunUID string
|
||||
}
|
||||
|
||||
type MigrateDataResponseItem struct {
|
||||
Type MigrateDataType
|
||||
RefID string
|
||||
Status ItemStatus
|
||||
Error string
|
||||
}
|
||||
|
||||
type CreateSessionResponse struct {
|
||||
SnapshotUid string
|
||||
}
|
||||
|
@ -139,4 +139,23 @@ func addCloudMigrationsMigrations(mg *Migrator) {
|
||||
|
||||
errorStringColumn := Column{Name: "error_string", Type: DB_Text, Nullable: true}
|
||||
mg.AddMigration("add snapshot error_string column", NewAddColumnMigration(migrationSnapshotTable, &errorStringColumn))
|
||||
|
||||
// --- create table for tracking resource migrations
|
||||
migrationResourceTable := Table{
|
||||
Name: "cloud_migration_resource",
|
||||
Columns: []*Column{
|
||||
{Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true},
|
||||
{Name: "uid", Type: DB_NVarchar, Length: 40, Nullable: false},
|
||||
{Name: "resource_type", Type: DB_NVarchar, Length: 40, Nullable: false},
|
||||
{Name: "resource_uid", Type: DB_NVarchar, Length: 40, Nullable: false},
|
||||
{Name: "status", Type: DB_NVarchar, Length: 20, Nullable: false},
|
||||
{Name: "error_string", Type: DB_Text, Nullable: true},
|
||||
{Name: "snapshot_uid", Type: DB_NVarchar, Length: 40, Nullable: false},
|
||||
},
|
||||
}
|
||||
|
||||
mg.AddMigration("create cloud_migration_resource table v1", NewAddTableMigration(migrationResourceTable))
|
||||
|
||||
// -- delete the snapshot result column while still in the experimental phase
|
||||
mg.AddMigration("delete cloud_migration_snapshot.result column", NewRawSQLMigration("ALTER TABLE cloud_migration_snapshot DROP COLUMN result"))
|
||||
}
|
||||
|
@ -5382,7 +5382,9 @@
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"OK",
|
||||
"ERROR"
|
||||
"ERROR",
|
||||
"PENDING",
|
||||
"UNKNOWN"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
|
@ -2564,6 +2564,22 @@
|
||||
"summary": "Get metadata about a snapshot, including where it is in its processing and final results.",
|
||||
"operationId": "getSnapshot",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"default": 1,
|
||||
"description": "ResultPage is used for pagination with ResultLimit",
|
||||
"name": "resultPage",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"default": 100,
|
||||
"description": "Max limit for snapshot results returned.",
|
||||
"name": "resultLimit",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Session UID of a session",
|
||||
@ -2694,9 +2710,9 @@
|
||||
{
|
||||
"type": "integer",
|
||||
"format": "int64",
|
||||
"default": 0,
|
||||
"description": "Offset is used for pagination with limit",
|
||||
"name": "offset",
|
||||
"default": 1,
|
||||
"description": "Page is used for pagination with limit",
|
||||
"name": "page",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
@ -16874,7 +16890,9 @@
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"OK",
|
||||
"ERROR"
|
||||
"ERROR",
|
||||
"PENDING",
|
||||
"UNKNOWN"
|
||||
]
|
||||
},
|
||||
"type": {
|
||||
|
@ -7009,7 +7009,9 @@
|
||||
"status": {
|
||||
"enum": [
|
||||
"OK",
|
||||
"ERROR"
|
||||
"ERROR",
|
||||
"PENDING",
|
||||
"UNKNOWN"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
@ -15527,6 +15529,26 @@
|
||||
"get": {
|
||||
"operationId": "getSnapshot",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "ResultPage is used for pagination with ResultLimit",
|
||||
"in": "query",
|
||||
"name": "resultPage",
|
||||
"schema": {
|
||||
"default": 1,
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Max limit for snapshot results returned.",
|
||||
"in": "query",
|
||||
"name": "resultLimit",
|
||||
"schema": {
|
||||
"default": 100,
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Session UID of a session",
|
||||
"in": "path",
|
||||
@ -15667,11 +15689,11 @@
|
||||
"operationId": "getShapshotList",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Offset is used for pagination with limit",
|
||||
"description": "Page is used for pagination with limit",
|
||||
"in": "query",
|
||||
"name": "offset",
|
||||
"name": "page",
|
||||
"schema": {
|
||||
"default": 0,
|
||||
"default": 1,
|
||||
"format": "int64",
|
||||
"type": "integer"
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user