mirror of
https://github.com/grafana/grafana.git
synced 2025-02-25 18:55:37 -06:00
Chore: capitalise log messages for app platform (#74336)
This commit is contained in:
@@ -247,7 +247,7 @@ func (c cdkBlobStorage) DeleteFolder(ctx context.Context, folderPath string, opt
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.log.Error("force folder delete: failed to retrieve next object", "err", err)
|
||||
c.log.Error("Force folder delete: failed to retrieve next object", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -266,7 +266,7 @@ func (c cdkBlobStorage) DeleteFolder(ctx context.Context, folderPath string, opt
|
||||
|
||||
for _, path := range pathsToDelete {
|
||||
if !options.AccessFilter.IsAllowed(path) {
|
||||
c.log.Error("force folder delete: unauthorized access", "path", path)
|
||||
c.log.Error("Force folder delete: unauthorized access", "path", path)
|
||||
return fmt.Errorf("force folder delete error, unauthorized access to %s", path)
|
||||
}
|
||||
}
|
||||
@@ -274,7 +274,7 @@ func (c cdkBlobStorage) DeleteFolder(ctx context.Context, folderPath string, opt
|
||||
var lastErr error
|
||||
for _, path := range pathsToDelete {
|
||||
if err := c.bucket.Delete(ctx, path); err != nil {
|
||||
c.log.Error("force folder delete: failed while deleting a file", "err", err, "path", path)
|
||||
c.log.Error("Force folder delete: failed while deleting a file", "err", err, "path", path)
|
||||
lastErr = err
|
||||
// keep going and delete remaining files
|
||||
}
|
||||
|
||||
@@ -234,7 +234,7 @@ func (s dbFileStorage) Upsert(ctx context.Context, cmd *UpsertFileCommand) error
|
||||
if len(cmd.Properties) != 0 {
|
||||
if err = upsertProperties(s.db.GetDialect(), sess, now, cmd, pathHash); err != nil {
|
||||
if rollbackErr := sess.Rollback(); rollbackErr != nil {
|
||||
s.log.Error("failed while rolling back upsert", "path", cmd.Path)
|
||||
s.log.Error("Failed while rolling back upsert", "path", cmd.Path)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -543,7 +543,7 @@ func (s dbFileStorage) DeleteFolder(ctx context.Context, folderPath string, opti
|
||||
}
|
||||
|
||||
if int64(len(rawHashes)) != accessibleFilesCount {
|
||||
s.log.Error("force folder delete: unauthorized access", "path", lowerFolderPath, "expectedAccessibleFilesCount", int64(len(rawHashes)), "actualAccessibleFilesCount", accessibleFilesCount)
|
||||
s.log.Error("Force folder delete: unauthorized access", "path", lowerFolderPath, "expectedAccessibleFilesCount", int64(len(rawHashes)), "actualAccessibleFilesCount", accessibleFilesCount)
|
||||
return fmt.Errorf("force folder delete: unauthorized access for path %s", lowerFolderPath)
|
||||
}
|
||||
|
||||
|
||||
@@ -210,7 +210,7 @@ func (h *DashboardHandler) DashboardDeleted(orgID int64, user *user.UserDisplayD
|
||||
func (h *DashboardHandler) HasGitOpsObserver(orgID int64) bool {
|
||||
count, err := h.ClientCount(orgID, GitopsChannel)
|
||||
if err != nil {
|
||||
logger.Error("error getting client count", "error", err)
|
||||
logger.Error("Error getting client count", "error", err)
|
||||
return false
|
||||
}
|
||||
return count > 0
|
||||
|
||||
@@ -793,7 +793,7 @@ func subscribeStatusToHTTPError(status backend.SubscribeStreamStatus) (int, stri
|
||||
case backend.SubscribeStreamStatusPermissionDenied:
|
||||
return http.StatusForbidden, http.StatusText(http.StatusForbidden)
|
||||
default:
|
||||
logger.Warn("unknown subscribe status", "status", status)
|
||||
logger.Warn("Unknown subscribe status", "status", status)
|
||||
return http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
@@ -805,7 +805,7 @@ func publishStatusToHTTPError(status backend.PublishStreamStatus) (int, string)
|
||||
case backend.PublishStreamStatusPermissionDenied:
|
||||
return http.StatusForbidden, http.StatusText(http.StatusForbidden)
|
||||
default:
|
||||
logger.Warn("unknown publish status", "status", status)
|
||||
logger.Warn("Unknown publish status", "status", status)
|
||||
return http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (s *CacheSegmentedTree) updatePeriodically() {
|
||||
for _, orgID := range orgIDs {
|
||||
err := s.fillOrg(orgID)
|
||||
if err != nil {
|
||||
logger.Error("error filling orgId", "error", err, "orgId", orgID)
|
||||
logger.Error("Error filling orgId", "error", err, "orgId", orgID)
|
||||
}
|
||||
}
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
@@ -237,7 +237,7 @@ func (s *metricFrame) append(m influx.Metric) error {
|
||||
if index, ok := s.fieldCache[f.Key]; ok {
|
||||
field := s.fields[index]
|
||||
if ft != field.Type() {
|
||||
logger.Warn("error appending values", "type", field.Type(), "expect", ft, "value", v, "key", f.Key, "line", m)
|
||||
logger.Warn("Error appending values", "type", field.Type(), "expect", ft, "value", v, "key", f.Key, "line", m)
|
||||
if field.Type() == data.FieldTypeNullableString && v != nil {
|
||||
str := fmt.Sprintf("%v", f.Value)
|
||||
v = &str
|
||||
|
||||
@@ -34,14 +34,14 @@ func (a *simpleAuthService) GetDashboardReadFilter(ctx context.Context, orgID in
|
||||
if kind == entityKindFolder {
|
||||
scopes, err := dashboards.GetInheritedScopes(ctx, orgID, uid, a.folderService)
|
||||
if err != nil {
|
||||
a.logger.Debug("could not retrieve inherited folder scopes:", "err", err)
|
||||
a.logger.Debug("Could not retrieve inherited folder scopes:", "err", err)
|
||||
}
|
||||
scopes = append(scopes, dashboards.ScopeFoldersProvider.GetResourceScopeUID(uid))
|
||||
return canReadFolder(scopes...)
|
||||
} else if kind == entityKindDashboard {
|
||||
scopes, err := dashboards.GetInheritedScopes(ctx, orgID, parent, a.folderService)
|
||||
if err != nil {
|
||||
a.logger.Debug("could not retrieve inherited folder scopes:", "err", err)
|
||||
a.logger.Debug("Could not retrieve inherited folder scopes:", "err", err)
|
||||
}
|
||||
scopes = append(scopes, dashboards.ScopeDashboardsProvider.GetResourceScopeUID(uid))
|
||||
scopes = append(scopes, dashboards.ScopeFoldersProvider.GetResourceScopeUID(parent))
|
||||
|
||||
@@ -380,7 +380,7 @@ func doSearchQuery(
|
||||
|
||||
reader, cancel, err := index.readerForIndex(indexTypeDashboard)
|
||||
if err != nil {
|
||||
logger.Error("error getting reader for dashboard index: %v", err)
|
||||
logger.Error("Error getting reader for dashboard index: %v", err)
|
||||
response.Error = err
|
||||
return response
|
||||
}
|
||||
@@ -493,7 +493,7 @@ func doSearchQuery(
|
||||
// execute this search on the reader
|
||||
documentMatchIterator, err := reader.Search(ctx, req)
|
||||
if err != nil {
|
||||
logger.Error("error executing search", "err", err)
|
||||
logger.Error("Error executing search", "err", err)
|
||||
response.Error = err
|
||||
return response
|
||||
}
|
||||
@@ -575,7 +575,7 @@ func doSearchQuery(
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("error loading stored fields", "err", err)
|
||||
logger.Error("Error loading stored fields", "err", err)
|
||||
response.Error = err
|
||||
return response
|
||||
}
|
||||
|
||||
@@ -60,9 +60,9 @@ func (q *PermissionFilter) logAccessDecision(decision bool, kind any, id string,
|
||||
|
||||
ctx = append(ctx, "kind", kind, "id", id, "reason", reason)
|
||||
if decision {
|
||||
q.log.Debug("allowing access", ctx...)
|
||||
q.log.Debug("Allowing access", ctx...)
|
||||
} else {
|
||||
q.log.Info("denying access", ctx...)
|
||||
q.log.Info("Denying access", ctx...)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -438,13 +438,13 @@ func (i *searchIndex) reportSizeOfIndexDiskBackup(orgID int64) {
|
||||
// create a temp directory to store the index
|
||||
tmpDir, err := os.MkdirTemp("", "grafana.dashboard_index")
|
||||
if err != nil {
|
||||
i.logger.Error("can't create temp dir", "error", err)
|
||||
i.logger.Error("Can't create temp dir", "error", err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err := os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
i.logger.Error("can't remove temp dir", "error", err, "tmpDir", tmpDir)
|
||||
i.logger.Error("Can't remove temp dir", "error", err, "tmpDir", tmpDir)
|
||||
return
|
||||
}
|
||||
}()
|
||||
@@ -452,13 +452,13 @@ func (i *searchIndex) reportSizeOfIndexDiskBackup(orgID int64) {
|
||||
cancelCh := make(chan struct{})
|
||||
err = reader.Backup(tmpDir, cancelCh)
|
||||
if err != nil {
|
||||
i.logger.Error("can't create index disk backup", "error", err)
|
||||
i.logger.Error("Can't create index disk backup", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
size, err := dirSize(tmpDir)
|
||||
if err != nil {
|
||||
i.logger.Error("can't calculate dir size", "error", err)
|
||||
i.logger.Error("Can't calculate dir size", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -598,7 +598,7 @@ func (i *searchIndex) applyIndexUpdates(ctx context.Context, lastEventID int64)
|
||||
ctx = log.InitCounter(ctx)
|
||||
events, err := i.eventStore.GetAllEventsAfter(ctx, lastEventID)
|
||||
if err != nil {
|
||||
i.logger.Error("can't load events", "error", err)
|
||||
i.logger.Error("Can't load events", "error", err)
|
||||
return lastEventID
|
||||
}
|
||||
if len(events) == 0 {
|
||||
@@ -608,7 +608,7 @@ func (i *searchIndex) applyIndexUpdates(ctx context.Context, lastEventID int64)
|
||||
for _, e := range events {
|
||||
err := i.applyEventOnIndex(ctx, e)
|
||||
if err != nil {
|
||||
i.logger.Error("can't apply event", "error", err)
|
||||
i.logger.Error("Can't apply event", "error", err)
|
||||
return lastEventID
|
||||
}
|
||||
lastEventID = e.Id
|
||||
@@ -618,22 +618,22 @@ func (i *searchIndex) applyIndexUpdates(ctx context.Context, lastEventID int64)
|
||||
}
|
||||
|
||||
func (i *searchIndex) applyEventOnIndex(ctx context.Context, e *store.EntityEvent) error {
|
||||
i.logger.Debug("processing event", "event", e)
|
||||
i.logger.Debug("Processing event", "event", e)
|
||||
|
||||
if !strings.HasPrefix(e.EntityId, "database/") {
|
||||
i.logger.Warn("unknown storage", "entityId", e.EntityId)
|
||||
i.logger.Warn("Unknown storage", "entityId", e.EntityId)
|
||||
return nil
|
||||
}
|
||||
// database/org/entityType/path*
|
||||
parts := strings.SplitN(strings.TrimPrefix(e.EntityId, "database/"), "/", 3)
|
||||
if len(parts) != 3 {
|
||||
i.logger.Error("can't parse entityId", "entityId", e.EntityId)
|
||||
i.logger.Error("Can't parse entityId", "entityId", e.EntityId)
|
||||
return nil
|
||||
}
|
||||
orgIDStr := parts[0]
|
||||
orgID, err := strconv.ParseInt(orgIDStr, 10, 64)
|
||||
if err != nil {
|
||||
i.logger.Error("can't extract org ID", "entityId", e.EntityId)
|
||||
i.logger.Error("Can't extract org ID", "entityId", e.EntityId)
|
||||
return nil
|
||||
}
|
||||
kind := store.EntityType(parts[1])
|
||||
|
||||
@@ -198,7 +198,7 @@ func (s *StandardSearchService) getUser(ctx context.Context, backendUser *backen
|
||||
permissions, err := s.ac.GetUserPermissions(ctx, usr,
|
||||
accesscontrol.Options{ReloadCache: false})
|
||||
if err != nil {
|
||||
s.logger.Error("failed to retrieve user permissions", "error", err, "email", backendUser.Email)
|
||||
s.logger.Error("Failed to retrieve user permissions", "error", err, "email", backendUser.Email)
|
||||
return nil, errors.New("auth error")
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ func (s *StandardSearchService) doDashboardQuery(ctx context.Context, signedInUs
|
||||
|
||||
if q.WithAllowedActions {
|
||||
if err := s.addAllowedActionsField(ctx, orgID, signedInUser, response); err != nil {
|
||||
s.logger.Error("error when adding the allowedActions field", "err", err)
|
||||
s.logger.Error("Error when adding the allowedActions field", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ func updateUsageStats(ctx context.Context, reader *bluge.Reader, logger log.Logg
|
||||
// execute this search on the reader
|
||||
documentMatchIterator, err := reader.Search(ctx, req)
|
||||
if err != nil {
|
||||
logger.Error("error executing search", "err", err)
|
||||
logger.Error("Error executing search", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ func (e *entityEventService) deleteEventsOlderThan(ctx context.Context, duration
|
||||
return e.sql.WithDbSession(ctx, func(sess *db.Session) error {
|
||||
maxCreated := time.Now().Add(-duration)
|
||||
deletedCount, err := sess.Where("created < ?", maxCreated.Unix()).Delete(&EntityEvent{})
|
||||
e.log.Info("deleting old events", "count", deletedCount, "maxCreated", maxCreated)
|
||||
e.log.Info("Deleting old events", "count", deletedCount, "maxCreated", maxCreated)
|
||||
return err
|
||||
})
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func (e *entityEventService) Run(ctx context.Context) error {
|
||||
go func() {
|
||||
err := e.deleteEventsOlderThan(context.Background(), 24*time.Hour)
|
||||
if err != nil {
|
||||
e.log.Info("failed to delete old entity events", "error", err)
|
||||
e.log.Info("Failed to delete old entity events", "error", err)
|
||||
}
|
||||
}()
|
||||
case <-ctx.Done():
|
||||
|
||||
@@ -92,7 +92,7 @@ func (a *pathFilterFileGuardian) can(action string, path string) bool {
|
||||
|
||||
allow = pathFilter.IsAllowed(path)
|
||||
if !allow {
|
||||
a.log.Warn("denying", "action", action, "path", path)
|
||||
a.log.Warn("Denying", "action", action, "path", path)
|
||||
}
|
||||
return allow
|
||||
}
|
||||
|
||||
@@ -21,10 +21,10 @@ func (s *standardStorageService) sanitizeContents(ctx context.Context, user *use
|
||||
})
|
||||
if err != nil {
|
||||
if s.cfg != nil && s.cfg.AllowUnsanitizedSvgUpload {
|
||||
grafanaStorageLogger.Debug("allowing unsanitized svg upload", "filename", req.Path, "sanitizationError", err)
|
||||
grafanaStorageLogger.Debug("Allowing unsanitized svg upload", "filename", req.Path, "sanitizationError", err)
|
||||
return req.Contents, nil
|
||||
} else {
|
||||
grafanaStorageLogger.Debug("disallowing unsanitized svg upload", "filename", req.Path, "sanitizationError", err)
|
||||
grafanaStorageLogger.Debug("Disallowing unsanitized svg upload", "filename", req.Path, "sanitizationError", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -45,7 +45,7 @@ func (s *standardStorageService) sanitizeUploadRequest(ctx context.Context, user
|
||||
// we have already validated that the file contents match the extension in `./validate.go`
|
||||
mimeType := mime.TypeByExtension(filepath.Ext(req.Path))
|
||||
if mimeType == "" {
|
||||
grafanaStorageLogger.Info("failed to find mime type", "path", req.Path)
|
||||
grafanaStorageLogger.Info("Failed to find mime type", "path", req.Path)
|
||||
mimeType = "application/octet-stream"
|
||||
}
|
||||
|
||||
|
||||
@@ -101,7 +101,7 @@ func ProvideService(
|
||||
) (StorageService, error) {
|
||||
settings, err := LoadStorageConfig(cfg, features)
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Warn("error loading storage config", "error", err)
|
||||
grafanaStorageLogger.Warn("Error loading storage config", "error", err)
|
||||
}
|
||||
|
||||
if err := migrations.MigrateEntityStore(sql, features); err != nil {
|
||||
@@ -160,7 +160,7 @@ func ProvideService(
|
||||
root.UnderContentRoot = true
|
||||
s, err := newStorage(root, filepath.Join(cfg.DataPath, "storage", "cache", root.Prefix))
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Warn("error loading storage config", "error", err)
|
||||
grafanaStorageLogger.Warn("Error loading storage config", "error", err)
|
||||
}
|
||||
if s != nil {
|
||||
globalRoots = append(globalRoots, s)
|
||||
@@ -209,7 +209,7 @@ func ProvideService(
|
||||
if storageName == RootSystem {
|
||||
filter, err := systemUsersService.GetFilter(user)
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Error("failed to create path filter for system user", "userID", user.UserID, "userLogin", user.Login, "err", err)
|
||||
grafanaStorageLogger.Error("Failed to create path filter for system user", "userID", user.UserID, "userLogin", user.Login, "err", err)
|
||||
return map[string]filestorage.PathFilter{
|
||||
ActionFilesRead: denyAllPathFilter,
|
||||
ActionFilesWrite: denyAllPathFilter,
|
||||
@@ -328,7 +328,7 @@ func newStandardStorageService(
|
||||
}
|
||||
|
||||
func (s *standardStorageService) Run(ctx context.Context) error {
|
||||
grafanaStorageLogger.Info("storage starting")
|
||||
grafanaStorageLogger.Info("Storage starting")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -411,22 +411,22 @@ func (s *standardStorageService) Upload(ctx context.Context, user *user.SignedIn
|
||||
|
||||
validationResult := s.validateUploadRequest(ctx, user, req, storagePath)
|
||||
if !validationResult.ok {
|
||||
grafanaStorageLogger.Warn("file upload validation failed", "path", req.Path, "reason", validationResult.reason)
|
||||
grafanaStorageLogger.Warn("File upload validation failed", "path", req.Path, "reason", validationResult.reason)
|
||||
return ErrValidationFailed
|
||||
}
|
||||
|
||||
upsertCommand, err := s.sanitizeUploadRequest(ctx, user, req, storagePath)
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Error("failed while sanitizing the upload request", "path", req.Path, "error", err)
|
||||
grafanaStorageLogger.Error("Failed while sanitizing the upload request", "path", req.Path, "error", err)
|
||||
return ErrUploadInternalError
|
||||
}
|
||||
|
||||
grafanaStorageLogger.Info("uploading a file", "path", req.Path)
|
||||
grafanaStorageLogger.Info("Uploading a file", "path", req.Path)
|
||||
|
||||
if !req.OverwriteExistingFile {
|
||||
file, _, err := root.Store().Get(ctx, storagePath, &filestorage.GetFileOptions{WithContents: false})
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Error("failed while checking file existence", "err", err, "path", req.Path)
|
||||
grafanaStorageLogger.Error("Failed while checking file existence", "err", err, "path", req.Path)
|
||||
return ErrUploadInternalError
|
||||
}
|
||||
|
||||
@@ -436,7 +436,7 @@ func (s *standardStorageService) Upload(ctx context.Context, user *user.SignedIn
|
||||
}
|
||||
|
||||
if err := root.Store().Upsert(ctx, upsertCommand); err != nil {
|
||||
grafanaStorageLogger.Error("failed while uploading the file", "err", err, "path", req.Path)
|
||||
grafanaStorageLogger.Error("Failed while uploading the file", "err", err, "path", req.Path)
|
||||
return ErrUploadInternalError
|
||||
}
|
||||
|
||||
@@ -447,12 +447,12 @@ func (s *standardStorageService) checkFileQuota(ctx context.Context, path string
|
||||
// assumes we are only uploading to the SQL database - TODO: refactor once we introduce object stores
|
||||
quotaReached, err := s.quotaService.CheckQuotaReached(ctx, QuotaTargetSrv, nil)
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Error("failed while checking upload quota", "path", path, "error", err)
|
||||
grafanaStorageLogger.Error("Failed while checking upload quota", "path", path, "error", err)
|
||||
return ErrUploadInternalError
|
||||
}
|
||||
|
||||
if quotaReached {
|
||||
grafanaStorageLogger.Info("reached file quota", "path", path)
|
||||
grafanaStorageLogger.Info("Reached file quota", "path", path)
|
||||
return ErrQuotaReached
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func newDiskStorage(meta RootStorageMeta, scfg RootStorageConfig) *rootStorageDi
|
||||
path := protocol + cfg.Path
|
||||
bucket, err := blob.OpenBucket(context.Background(), path)
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Warn("error loading storage", "prefix", scfg.Prefix, "err", err)
|
||||
grafanaStorageLogger.Warn("Error loading storage", "prefix", scfg.Prefix, "err", err)
|
||||
meta.Notice = append(meta.Notice, data.Notice{
|
||||
Severity: data.NoticeSeverityError,
|
||||
Text: "Failed to initialize storage",
|
||||
|
||||
@@ -117,7 +117,7 @@ func newGitStorage(meta RootStorageMeta, scfg RootStorageConfig, localWorkCache
|
||||
path := fmt.Sprintf("file://%s", p)
|
||||
bucket, err := blob.OpenBucket(context.Background(), path)
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Warn("error loading storage", "prefix", scfg.Prefix, "err", err)
|
||||
grafanaStorageLogger.Warn("Error loading storage", "prefix", scfg.Prefix, "err", err)
|
||||
meta.Notice = append(meta.Notice, data.Notice{
|
||||
Severity: data.NoticeSeverityError,
|
||||
Text: "Failed to initialize storage",
|
||||
@@ -158,7 +158,7 @@ func newGitStorage(meta RootStorageMeta, scfg RootStorageConfig, localWorkCache
|
||||
})
|
||||
s.github = nil
|
||||
} else {
|
||||
grafanaStorageLogger.Info("default branch", "branch", *ghrepo.DefaultBranch)
|
||||
grafanaStorageLogger.Info("Default branch", "branch", *ghrepo.DefaultBranch)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -185,10 +185,10 @@ func newGitStorage(meta RootStorageMeta, scfg RootStorageConfig, localWorkCache
|
||||
ticker := time.NewTicker(t)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
grafanaStorageLogger.Info("try git pull", "branch", s.settings.Remote)
|
||||
grafanaStorageLogger.Info("Try git pull", "branch", s.settings.Remote)
|
||||
err = s.Sync()
|
||||
if err != nil {
|
||||
grafanaStorageLogger.Info("error pulling", "error", err)
|
||||
grafanaStorageLogger.Info("Error pulling", "error", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -355,7 +355,7 @@ func (s *rootStorageGit) Write(ctx context.Context, cmd *WriteValueRequest) (*Wr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
grafanaStorageLogger.Info("made commit", "hash", hash)
|
||||
grafanaStorageLogger.Info("Made commit", "hash", hash)
|
||||
// err = s.repo.Push(&git.PushOptions{
|
||||
// InsecureSkipTLS: true,
|
||||
// })
|
||||
|
||||
Reference in New Issue
Block a user