diff --git a/server/Makefile b/server/Makefile index 70f28f9bea..144e427f80 100644 --- a/server/Makefile +++ b/server/Makefile @@ -377,7 +377,7 @@ plugin-mocks: ## Creates mock files for plugins. einterfaces-mocks: ## Creates mock files for einterfaces. $(GO) install github.com/vektra/mockery/v2/...@v2.23.2 - $(GOBIN)/mockery --dir channels/einterfaces --all --output channels/einterfaces/mocks --note 'Regenerate this file using `make einterfaces-mocks`.' + $(GOBIN)/mockery --dir einterfaces --all --output einterfaces/mocks --note 'Regenerate this file using `make einterfaces-mocks`.' searchengine-mocks: ## Creates mock files for searchengines. $(GO) install github.com/vektra/mockery/v2/...@v2.23.2 diff --git a/server/channels/api4/job.go b/server/channels/api4/job.go index 1555fc4480..d258f2479c 100644 --- a/server/channels/api4/job.go +++ b/server/channels/api4/job.go @@ -31,7 +31,7 @@ func getJob(c *Context, w http.ResponseWriter, r *http.Request) { return } - job, err := c.App.GetJob(c.Params.JobId) + job, err := c.App.GetJob(c.AppContext, c.Params.JobId) if err != nil { c.Err = err return @@ -67,7 +67,7 @@ func downloadJob(c *Context, w http.ResponseWriter, r *http.Request) { return } - job, err := c.App.GetJob(c.Params.JobId) + job, err := c.App.GetJob(c.AppContext, c.Params.JobId) if err != nil { c.Err = err return @@ -126,7 +126,7 @@ func createJob(c *Context, w http.ResponseWriter, r *http.Request) { return } - rjob, err := c.App.CreateJob(&job) + rjob, err := c.App.CreateJob(c.AppContext, &job) if err != nil { c.Err = err return @@ -163,7 +163,7 @@ func getJobs(c *Context, w http.ResponseWriter, r *http.Request) { return } - jobs, appErr := c.App.GetJobsByTypesPage(validJobTypes, c.Params.Page, c.Params.PerPage) + jobs, appErr := c.App.GetJobsByTypesPage(c.AppContext, validJobTypes, c.Params.Page, c.Params.PerPage) if appErr != nil { c.Err = appErr return @@ -193,7 +193,7 @@ func getJobsByType(c *Context, w http.ResponseWriter, r *http.Request) { return } - jobs, appErr := c.App.GetJobsByTypePage(c.Params.JobType, c.Params.Page, c.Params.PerPage) + jobs, appErr := c.App.GetJobsByTypePage(c.AppContext, c.Params.JobType, c.Params.Page, c.Params.PerPage) if appErr != nil { c.Err = appErr return @@ -218,7 +218,7 @@ func cancelJob(c *Context, w http.ResponseWriter, r *http.Request) { defer c.LogAuditRec(auditRec) audit.AddEventParameter(auditRec, "job_id", c.Params.JobId) - job, err := c.App.GetJob(c.Params.JobId) + job, err := c.App.GetJob(c.AppContext, c.Params.JobId) if err != nil { c.Err = err return @@ -239,7 +239,7 @@ func cancelJob(c *Context, w http.ResponseWriter, r *http.Request) { return } - if err := c.App.CancelJob(c.Params.JobId); err != nil { + if err := c.App.CancelJob(c.AppContext, c.Params.JobId); err != nil { c.Err = err return } diff --git a/server/channels/api4/ldap.go b/server/channels/api4/ldap.go index b7dc07cbb8..7312802785 100644 --- a/server/channels/api4/ldap.go +++ b/server/channels/api4/ldap.go @@ -66,7 +66,7 @@ func syncLdap(c *Context, w http.ResponseWriter, r *http.Request) { return } - c.App.SyncLdap(opts.IncludeRemovedMembers) + c.App.SyncLdap(c.AppContext, opts.IncludeRemovedMembers) auditRec.Success() ReturnStatusOK(w) diff --git a/server/channels/api4/ldap_test.go b/server/channels/api4/ldap_test.go index bc09d51d68..46623ec686 100644 --- a/server/channels/api4/ldap_test.go +++ b/server/channels/api4/ldap_test.go @@ -145,13 +145,14 @@ func TestSyncLdap(t *testing.T) { ldapMock := &mocks.LdapInterface{} mockCall := ldapMock.On( "StartSynchronizeJob", + mock.AnythingOfType("*request.Context"), mock.AnythingOfType("bool"), mock.AnythingOfType("bool"), ).Return(nil, nil) ready := make(chan bool) includeRemovedMembers := false mockCall.RunFn = func(args mock.Arguments) { - includeRemovedMembers = args[1].(bool) + includeRemovedMembers = args[2].(bool) ready <- true } th.App.Channels().Ldap = ldapMock diff --git a/server/channels/api4/system.go b/server/channels/api4/system.go index 343016ead2..9b85abcfb8 100644 --- a/server/channels/api4/system.go +++ b/server/channels/api4/system.go @@ -98,7 +98,7 @@ func generateSupportPacket(c *Context, w http.ResponseWriter, r *http.Request) { return } - fileDatas := c.App.GenerateSupportPacket() + fileDatas := c.App.GenerateSupportPacket(c.AppContext) // Constructing the ZIP file name as per spec (mattermost_support_packet_YYYY-MM-DD-HH-MM.zip) now := time.Now() diff --git a/server/channels/api4/team.go b/server/channels/api4/team.go index 87b1a8b98e..24349b8806 100644 --- a/server/channels/api4/team.go +++ b/server/channels/api4/team.go @@ -1432,7 +1432,7 @@ func inviteUsersToTeam(c *Context, w http.ResponseWriter, r *http.Request) { } // we then manually schedule the job to send another invite after 48 hours - _, appErr = c.App.Srv().Jobs.CreateJob(model.JobTypeResendInvitationEmail, jobData) + _, appErr = c.App.Srv().Jobs.CreateJob(c.AppContext, model.JobTypeResendInvitationEmail, jobData) if appErr != nil { c.Err = model.NewAppError("Api4.inviteUsersToTeam", appErr.Id, nil, appErr.Error(), appErr.StatusCode) return @@ -1592,7 +1592,7 @@ func invalidateAllEmailInvites(c *Context, w http.ResponseWriter, r *http.Reques auditRec := c.MakeAuditRecord("invalidateAllEmailInvites", audit.Fail) defer c.LogAuditRec(auditRec) - if err := c.App.InvalidateAllEmailInvites(); err != nil { + if err := c.App.InvalidateAllEmailInvites(c.AppContext); err != nil { c.Err = err return } diff --git a/server/channels/api4/team_test.go b/server/channels/api4/team_test.go index 3076ae9924..02bf1aca75 100644 --- a/server/channels/api4/team_test.go +++ b/server/channels/api4/team_test.go @@ -3196,15 +3196,14 @@ func TestValidateUserPermissionsOnChannels(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() - // define user session and context - context := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) + ctx := request.EmptyContext(th.TestLogger) t.Run("User WITH permissions on private channel CAN invite members to it", func(t *testing.T) { channelIds := []string{th.BasicChannel.Id, th.BasicPrivateChannel.Id} require.Len(t, channelIds, 2) - channelIds = th.App.ValidateUserPermissionsOnChannels(context, th.BasicUser.Id, channelIds) + channelIds = th.App.ValidateUserPermissionsOnChannels(ctx, th.BasicUser.Id, channelIds) // basicUser has permission onBasicChannel and BasicPrivateChannel so he can invite to both channels require.Len(t, channelIds, 2) @@ -3216,7 +3215,7 @@ func TestValidateUserPermissionsOnChannels(t *testing.T) { require.Len(t, channelIds, 2) - channelIds = th.App.ValidateUserPermissionsOnChannels(context, th.BasicUser.Id, channelIds) + channelIds = th.App.ValidateUserPermissionsOnChannels(ctx, th.BasicUser.Id, channelIds) // basicUser DOES NOT have permission on BasicPrivateChannel2 so he can only invite to BasicChannel require.Len(t, channelIds, 1) diff --git a/server/channels/api4/user.go b/server/channels/api4/user.go index 7f20e105a1..145070a471 100644 --- a/server/channels/api4/user.go +++ b/server/channels/api4/user.go @@ -3072,7 +3072,7 @@ func migrateAuthToLDAP(c *Context, w http.ResponseWriter, r *http.Request) { } if migrate := c.App.AccountMigration(); migrate != nil { - if err := migrate.MigrateToLdap(from, matchField, force, false); err != nil { + if err := migrate.MigrateToLdap(c.AppContext, from, matchField, force, false); err != nil { c.Err = model.NewAppError("api.migrateAuthToLdap", "api.migrate_to_saml.error", nil, err.Error(), http.StatusInternalServerError) return } diff --git a/server/channels/app/app_iface.go b/server/channels/app/app_iface.go index 7850275fc9..6d41d2e37a 100644 --- a/server/channels/app/app_iface.go +++ b/server/channels/app/app_iface.go @@ -333,7 +333,7 @@ type AppIface interface { // SyncLdap starts an LDAP sync job. // If includeRemovedMembers is true, then members who left or were removed from a team/channel will // be re-added; otherwise, they will not be re-added. - SyncLdap(includeRemovedMembers bool) + SyncLdap(c *request.Context, includeRemovedMembers bool) // SyncPlugins synchronizes the plugins installed locally // with the plugin bundles available in the file store. SyncPlugins() *model.AppError @@ -444,7 +444,7 @@ type AppIface interface { BulkImport(c *request.Context, jsonlReader io.Reader, attachmentsReader *zip.Reader, dryRun bool, workers int) (*model.AppError, int) BulkImportWithPath(c *request.Context, jsonlReader io.Reader, attachmentsReader *zip.Reader, dryRun bool, workers int, importPath string) (*model.AppError, int) CanNotifyAdmin(trial bool) bool - CancelJob(jobId string) *model.AppError + CancelJob(c *request.Context, jobId string) *model.AppError ChannelMembersToRemove(teamID *string) ([]*model.ChannelMember, *model.AppError) Channels() *Channels CheckCanInviteToSharedChannel(channelId string) error @@ -487,7 +487,7 @@ type AppIface interface { CreateGroupChannel(c request.CTX, userIDs []string, creatorId string) (*model.Channel, *model.AppError) CreateGroupWithUserIds(group *model.GroupWithUserIds) (*model.Group, *model.AppError) CreateIncomingWebhookForChannel(creatorId string, channel *model.Channel, hook *model.IncomingWebhook) (*model.IncomingWebhook, *model.AppError) - CreateJob(job *model.Job) (*model.Job, *model.AppError) + CreateJob(c *request.Context, job *model.Job) (*model.Job, *model.AppError) CreateOAuthApp(app *model.OAuthApp) (*model.OAuthApp, *model.AppError) CreateOAuthStateToken(extra string) (*model.Token, *model.AppError) CreateOAuthUser(c *request.Context, service string, userData io.Reader, teamID string, tokenUser *model.User) (*model.User, *model.AppError) @@ -581,7 +581,7 @@ type AppIface interface { GenerateMfaSecret(userID string) (*model.MfaSecret, *model.AppError) GeneratePresignURLForExport(name string) (*model.PresignURLResponse, *model.AppError) GeneratePublicLink(siteURL string, info *model.FileInfo) string - GenerateSupportPacket() []model.FileData + GenerateSupportPacket(c *request.Context) []model.FileData GetAcknowledgementsForPost(postID string) ([]*model.PostAcknowledgement, *model.AppError) GetAcknowledgementsForPostList(postList *model.PostList) (map[string][]*model.PostAcknowledgement, *model.AppError) GetActivePluginManifests() ([]*model.Manifest, *model.AppError) @@ -678,13 +678,11 @@ type AppIface interface { GetIncomingWebhooksForTeamPageByUser(teamID string, userID string, page, perPage int) ([]*model.IncomingWebhook, *model.AppError) GetIncomingWebhooksPage(page, perPage int) ([]*model.IncomingWebhook, *model.AppError) GetIncomingWebhooksPageByUser(userID string, page, perPage int) ([]*model.IncomingWebhook, *model.AppError) - GetJob(id string) (*model.Job, *model.AppError) - GetJobs(offset int, limit int) ([]*model.Job, *model.AppError) - GetJobsByType(jobType string, offset int, limit int) ([]*model.Job, *model.AppError) - GetJobsByTypePage(jobType string, page int, perPage int) ([]*model.Job, *model.AppError) - GetJobsByTypes(jobTypes []string, offset int, limit int) ([]*model.Job, *model.AppError) - GetJobsByTypesPage(jobType []string, page int, perPage int) ([]*model.Job, *model.AppError) - GetJobsPage(page int, perPage int) ([]*model.Job, *model.AppError) + GetJob(c *request.Context, id string) (*model.Job, *model.AppError) + GetJobsByType(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, *model.AppError) + GetJobsByTypePage(c *request.Context, jobType string, page int, perPage int) ([]*model.Job, *model.AppError) + GetJobsByTypes(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, *model.AppError) + GetJobsByTypesPage(c *request.Context, jobType []string, page int, perPage int) ([]*model.Job, *model.AppError) GetLatestTermsOfService() (*model.TermsOfService, *model.AppError) GetLatestVersion(latestVersionUrl string) (*model.GithubReleaseInfo, *model.AppError) GetLogs(c request.CTX, page, perPage int) ([]string, *model.AppError) @@ -877,8 +875,8 @@ type AppIface interface { ImageProxyRemover() (f func(string) string) ImportPermissions(jsonl io.Reader) error InitPlugins(c *request.Context, pluginDir, webappPluginDir string) - InvalidateAllEmailInvites() *model.AppError - InvalidateAllResendInviteEmailJobs() *model.AppError + InvalidateAllEmailInvites(c *request.Context) *model.AppError + InvalidateAllResendInviteEmailJobs(c *request.Context) *model.AppError InvalidateCacheForUser(userID string) InvalidatePasswordRecoveryTokensForUser(userID string) *model.AppError InviteGuestsToChannels(teamID string, guestsInvite *model.GuestsInvite, senderId string) *model.AppError diff --git a/server/channels/app/import_test.go b/server/channels/app/import_test.go index 5c08503dcb..df209fb8e0 100644 --- a/server/channels/app/import_test.go +++ b/server/channels/app/import_test.go @@ -287,8 +287,7 @@ func AssertFileIdsInPost(files []*model.FileInfo, th *TestHelper, t *testing.T) } func TestProcessAttachments(t *testing.T) { - logger, _ := mlog.NewLogger() - c := request.EmptyContext(logger) + c := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) genAttachments := func() *[]imports.AttachmentImportData { return &[]imports.AttachmentImportData{ diff --git a/server/channels/app/job.go b/server/channels/app/job.go index 085ce57388..48e253d2d7 100644 --- a/server/channels/app/job.go +++ b/server/channels/app/job.go @@ -8,11 +8,12 @@ import ( "net/http" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" ) -func (a *App) GetJob(id string) (*model.Job, *model.AppError) { - job, err := a.Srv().Store().Job().Get(id) +func (a *App) GetJob(c *request.Context, id string) (*model.Job, *model.AppError) { + job, err := a.Srv().Store().Job().Get(c, id) if err != nil { var nfErr *store.ErrNotFound switch { @@ -26,25 +27,12 @@ func (a *App) GetJob(id string) (*model.Job, *model.AppError) { return job, nil } -func (a *App) GetJobsPage(page int, perPage int) ([]*model.Job, *model.AppError) { - return a.GetJobs(page*perPage, perPage) +func (a *App) GetJobsByTypePage(c *request.Context, jobType string, page int, perPage int) ([]*model.Job, *model.AppError) { + return a.GetJobsByType(c, jobType, page*perPage, perPage) } -func (a *App) GetJobs(offset int, limit int) ([]*model.Job, *model.AppError) { - jobs, err := a.Srv().Store().Job().GetAllPage(offset, limit) - if err != nil { - return nil, model.NewAppError("GetJobs", "app.job.get_all.app_error", nil, "", http.StatusInternalServerError).Wrap(err) - } - - return jobs, nil -} - -func (a *App) GetJobsByTypePage(jobType string, page int, perPage int) ([]*model.Job, *model.AppError) { - return a.GetJobsByType(jobType, page*perPage, perPage) -} - -func (a *App) GetJobsByType(jobType string, offset int, limit int) ([]*model.Job, *model.AppError) { - jobs, err := a.Srv().Store().Job().GetAllByTypePage(jobType, offset, limit) +func (a *App) GetJobsByType(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, *model.AppError) { + jobs, err := a.Srv().Store().Job().GetAllByTypePage(c, jobType, offset, limit) if err != nil { return nil, model.NewAppError("GetJobsByType", "app.job.get_all.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } @@ -52,24 +40,24 @@ func (a *App) GetJobsByType(jobType string, offset int, limit int) ([]*model.Job return jobs, nil } -func (a *App) GetJobsByTypesPage(jobType []string, page int, perPage int) ([]*model.Job, *model.AppError) { - return a.GetJobsByTypes(jobType, page*perPage, perPage) +func (a *App) GetJobsByTypesPage(c *request.Context, jobType []string, page int, perPage int) ([]*model.Job, *model.AppError) { + return a.GetJobsByTypes(c, jobType, page*perPage, perPage) } -func (a *App) GetJobsByTypes(jobTypes []string, offset int, limit int) ([]*model.Job, *model.AppError) { - jobs, err := a.Srv().Store().Job().GetAllByTypesPage(jobTypes, offset, limit) +func (a *App) GetJobsByTypes(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, *model.AppError) { + jobs, err := a.Srv().Store().Job().GetAllByTypesPage(c, jobTypes, offset, limit) if err != nil { return nil, model.NewAppError("GetJobsByType", "app.job.get_all.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } return jobs, nil } -func (a *App) CreateJob(job *model.Job) (*model.Job, *model.AppError) { - return a.Srv().Jobs.CreateJob(job.Type, job.Data) +func (a *App) CreateJob(c *request.Context, job *model.Job) (*model.Job, *model.AppError) { + return a.Srv().Jobs.CreateJob(c, job.Type, job.Data) } -func (a *App) CancelJob(jobId string) *model.AppError { - return a.Srv().Jobs.RequestCancellation(jobId) +func (a *App) CancelJob(c *request.Context, jobId string) *model.AppError { + return a.Srv().Jobs.RequestCancellation(c, jobId) } func (a *App) SessionHasPermissionToCreateJob(session model.Session, job *model.Job) (bool, *model.Permission) { diff --git a/server/channels/app/job_test.go b/server/channels/app/job_test.go index 03ac47c657..c7911a61f8 100644 --- a/server/channels/app/job_test.go +++ b/server/channels/app/job_test.go @@ -11,23 +11,27 @@ import ( "github.com/stretchr/testify/require" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store/sqlstore" ) func TestGetJob(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) status := &model.Job{ Id: model.NewId(), Status: model.NewId(), } + status.InitLogger(th.TestLogger) + _, err := th.App.Srv().Store().Job().Save(status) require.NoError(t, err) defer th.App.Srv().Store().Job().Delete(status.Id) - received, appErr := th.App.GetJob(status.Id) + received, appErr := th.App.GetJob(ctx, status.Id) require.Nil(t, appErr) require.Equal(t, status, received, "incorrect job status received") } @@ -216,6 +220,7 @@ func TestSessionHasPermissionToReadJob(t *testing.T) { func TestGetJobByType(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) jobType := model.NewId() @@ -238,18 +243,20 @@ func TestGetJobByType(t *testing.T) { } for _, status := range statuses { + status.InitLogger(th.TestLogger) + _, err := th.App.Srv().Store().Job().Save(status) require.NoError(t, err) defer th.App.Srv().Store().Job().Delete(status.Id) } - received, err := th.App.GetJobsByType(jobType, 0, 2) + received, err := th.App.GetJobsByType(ctx, jobType, 0, 2) require.Nil(t, err) require.Len(t, received, 2, "received wrong number of statuses") require.Equal(t, statuses[2], received[0], "should've received newest job first") require.Equal(t, statuses[0], received[1], "should've received second newest job second") - received, err = th.App.GetJobsByType(jobType, 2, 2) + received, err = th.App.GetJobsByType(ctx, jobType, 2, 2) require.Nil(t, err) require.Len(t, received, 1, "received wrong number of statuses") require.Equal(t, statuses[1], received[0], "should've received oldest job last") @@ -258,6 +265,7 @@ func TestGetJobByType(t *testing.T) { func TestGetJobsByTypes(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) jobType := model.NewId() jobType1 := model.NewId() @@ -282,25 +290,27 @@ func TestGetJobsByTypes(t *testing.T) { } for _, status := range statuses { + status.InitLogger(th.TestLogger) + _, err := th.App.Srv().Store().Job().Save(status) require.NoError(t, err) defer th.App.Srv().Store().Job().Delete(status.Id) } jobTypes := []string{jobType, jobType1, jobType2} - received, err := th.App.GetJobsByTypes(jobTypes, 0, 2) + received, err := th.App.GetJobsByTypes(ctx, jobTypes, 0, 2) require.Nil(t, err) require.Len(t, received, 2, "received wrong number of jobs") require.Equal(t, statuses[2], received[0], "should've received newest job first") require.Equal(t, statuses[0], received[1], "should've received second newest job second") - received, err = th.App.GetJobsByTypes(jobTypes, 2, 2) + received, err = th.App.GetJobsByTypes(ctx, jobTypes, 2, 2) require.Nil(t, err) require.Len(t, received, 1, "received wrong number of jobs") require.Equal(t, statuses[1], received[0], "should've received oldest job last") jobTypes = []string{jobType1, jobType2} - received, err = th.App.GetJobsByTypes(jobTypes, 0, 3) + received, err = th.App.GetJobsByTypes(ctx, jobTypes, 0, 3) require.Nil(t, err) require.Len(t, received, 2, "received wrong number of jobs") require.Equal(t, statuses[2], received[0], "received wrong job type") diff --git a/server/channels/app/ldap.go b/server/channels/app/ldap.go index 9ad70447b8..74f27ce1fa 100644 --- a/server/channels/app/ldap.go +++ b/server/channels/app/ldap.go @@ -11,12 +11,13 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/i18n" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" ) // SyncLdap starts an LDAP sync job. // If includeRemovedMembers is true, then members who left or were removed from a team/channel will // be re-added; otherwise, they will not be re-added. -func (a *App) SyncLdap(includeRemovedMembers bool) { +func (a *App) SyncLdap(c *request.Context, includeRemovedMembers bool) { a.Srv().Go(func() { if license := a.Srv().License(); license != nil && *license.Features.LDAP { @@ -30,7 +31,7 @@ func (a *App) SyncLdap(includeRemovedMembers bool) { mlog.Error("Not executing ldap sync because ldap is not available") return } - ldapI.StartSynchronizeJob(false, includeRemovedMembers) + ldapI.StartSynchronizeJob(c, false, includeRemovedMembers) } }) } diff --git a/server/channels/app/migrations.go b/server/channels/app/migrations.go index 0ddf32f0b7..74860c84ef 100644 --- a/server/channels/app/migrations.go +++ b/server/channels/app/migrations.go @@ -11,6 +11,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" ) const EmojisPermissionsMigrationKey = "EmojisPermissionsMigrationComplete" @@ -554,7 +555,7 @@ func (s *Server) doPostPriorityConfigDefaultTrueMigration() { } } -func (s *Server) doElasticsearchFixChannelIndex() { +func (s *Server) doElasticsearchFixChannelIndex(c *request.Context) { // If the migration is already marked as completed, don't do it again. if _, err := s.Store().System().GetByName(model.MigrationKeyElasticsearchFixChannelIndex); err == nil { return @@ -566,13 +567,13 @@ func (s *Server) doElasticsearchFixChannelIndex() { return } - if _, appErr := s.Jobs.CreateJob(model.JobTypeElasticsearchFixChannelIndex, nil); appErr != nil { + if _, appErr := s.Jobs.CreateJob(c, model.JobTypeElasticsearchFixChannelIndex, nil); appErr != nil { mlog.Fatal("failed to start job for fixing Elasticsearch channels index", mlog.Err(appErr)) return } } -func (s *Server) doCloudS3PathMigrations() { +func (s *Server) doCloudS3PathMigrations(c *request.Context) { // This migration is only applicable for cloud environments if os.Getenv("MM_CLOUD_FILESTORE_BIFROST") == "" { return @@ -585,7 +586,7 @@ func (s *Server) doCloudS3PathMigrations() { // If there is a job already pending, no need to schedule again. // This is possible if the pod was rolled over. - jobs, err := s.Store().Job().GetAllByTypeAndStatus(model.JobTypeS3PathMigration, model.JobStatusPending) + jobs, err := s.Store().Job().GetAllByTypeAndStatus(c, model.JobTypeS3PathMigration, model.JobStatusPending) if err != nil { mlog.Fatal("failed to get jobs by type and status", mlog.Err(err)) return @@ -594,7 +595,7 @@ func (s *Server) doCloudS3PathMigrations() { return } - if _, appErr := s.Jobs.CreateJobOnce(model.JobTypeS3PathMigration, nil); appErr != nil { + if _, appErr := s.Jobs.CreateJobOnce(c, model.JobTypeS3PathMigration, nil); appErr != nil { mlog.Fatal("failed to start job for migrating s3 file paths", mlog.Err(appErr)) return } @@ -606,6 +607,8 @@ func (a *App) DoAppMigrations() { } func (s *Server) doAppMigrations() { + c := request.EmptyContext(s.Log()) + s.doAdvancedPermissionsMigration() s.doEmojisPermissionsMigration() s.doGuestRolesCreationMigration() @@ -622,6 +625,6 @@ func (s *Server) doAppMigrations() { s.doFirstAdminSetupCompleteMigration() s.doRemainingSchemaMigrations() s.doPostPriorityConfigDefaultTrueMigration() - s.doElasticsearchFixChannelIndex() - s.doCloudS3PathMigrations() + s.doElasticsearchFixChannelIndex(c) + s.doCloudS3PathMigrations(c) } diff --git a/server/channels/app/notify_admin_test.go b/server/channels/app/notify_admin_test.go index 018c76fa37..88271554e9 100644 --- a/server/channels/app/notify_admin_test.go +++ b/server/channels/app/notify_admin_test.go @@ -4,7 +4,6 @@ package app import ( - "context" "fmt" "os" "testing" @@ -22,10 +21,10 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("no error sending non trial upgrade post when no notifications are available", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) err := th.App.SendNotifyAdminPosts(ctx, "", "", false) require.Nil(t, err) }) @@ -33,10 +32,10 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("no error sending trial upgrade post when no notifications are available", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) err := th.App.SendNotifyAdminPosts(ctx, "", "", true) require.Nil(t, err) }) @@ -44,6 +43,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("successfully send upgrade notification", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) @@ -62,7 +62,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "test", "", false) require.Nil(t, appErr) @@ -98,6 +97,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("successfully send trial upgrade notification", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) @@ -110,7 +110,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "test", "", true) require.Nil(t, appErr) @@ -146,6 +145,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("successfully send install plugin notification", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // some notifications _, appErr := th.App.SaveAdminNotifyData(&model.NotifyAdminData{ @@ -156,7 +156,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "", "", false) require.Nil(t, appErr) @@ -191,6 +190,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("persist notify admin data after sending the install plugin notification", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // some notifications _, appErr := th.App.SaveAdminNotifyData(&model.NotifyAdminData{ @@ -201,7 +201,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "", "", false) require.Nil(t, appErr) @@ -261,6 +260,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("error when trying to send upgrade post before end of cool off period", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) @@ -272,7 +272,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "", "", false) require.Nil(t, appErr) @@ -293,6 +292,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("can send upgrade post at the end of cool off period", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) @@ -307,7 +307,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "", "", false) require.Nil(t, appErr) @@ -329,6 +328,7 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("can filter notifications when plan changes within cool off period", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) th.App.Srv().SetLicense(model.NewTestLicense("cloud")) @@ -349,7 +349,6 @@ func Test_SendNotifyAdminPosts(t *testing.T) { }) require.Nil(t, appErr) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) appErr = th.App.SendNotifyAdminPosts(ctx, "test", model.LicenseShortSkuProfessional, false) // try and send notification but workspace currentSKU has since changed to cloud-professional require.Nil(t, appErr) @@ -385,13 +384,13 @@ func Test_SendNotifyAdminPosts(t *testing.T) { t.Run("correctly send upgrade and install plugin post with the correct user request", func(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) + os.Setenv("MM_NOTIFY_ADMIN_COOL_OFF_DAYS", "0") defer os.Unsetenv("MM_NOTIFY_ADMIN_COOL_OFF_DAYS") th.App.Srv().SetLicense(model.NewTestLicense("cloud")) - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) - // some notifications _, appErr := th.App.SaveAdminNotifyData(&model.NotifyAdminData{ UserId: th.BasicUser.Id, diff --git a/server/channels/app/opentracing/opentracing_layer.go b/server/channels/app/opentracing/opentracing_layer.go index 25b424533f..a54bd8c152 100644 --- a/server/channels/app/opentracing/opentracing_layer.go +++ b/server/channels/app/opentracing/opentracing_layer.go @@ -1075,7 +1075,7 @@ func (a *OpenTracingAppLayer) CanNotifyAdmin(trial bool) bool { return resultVar0 } -func (a *OpenTracingAppLayer) CancelJob(jobId string) *model.AppError { +func (a *OpenTracingAppLayer) CancelJob(c *request.Context, jobId string) *model.AppError { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.CancelJob") @@ -1087,7 +1087,7 @@ func (a *OpenTracingAppLayer) CancelJob(jobId string) *model.AppError { }() defer span.Finish() - resultVar0 := a.app.CancelJob(jobId) + resultVar0 := a.app.CancelJob(c, jobId) if resultVar0 != nil { span.LogFields(spanlog.Error(resultVar0)) @@ -2175,7 +2175,7 @@ func (a *OpenTracingAppLayer) CreateIncomingWebhookForChannel(creatorId string, return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) CreateJob(job *model.Job) (*model.Job, *model.AppError) { +func (a *OpenTracingAppLayer) CreateJob(c *request.Context, job *model.Job) (*model.Job, *model.AppError) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.CreateJob") @@ -2187,7 +2187,7 @@ func (a *OpenTracingAppLayer) CreateJob(job *model.Job) (*model.Job, *model.AppE }() defer span.Finish() - resultVar0, resultVar1 := a.app.CreateJob(job) + resultVar0, resultVar1 := a.app.CreateJob(c, job) if resultVar1 != nil { span.LogFields(spanlog.Error(resultVar1)) @@ -4623,7 +4623,7 @@ func (a *OpenTracingAppLayer) GeneratePublicLink(siteURL string, info *model.Fil return resultVar0 } -func (a *OpenTracingAppLayer) GenerateSupportPacket() []model.FileData { +func (a *OpenTracingAppLayer) GenerateSupportPacket(c *request.Context) []model.FileData { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GenerateSupportPacket") @@ -4635,7 +4635,7 @@ func (a *OpenTracingAppLayer) GenerateSupportPacket() []model.FileData { }() defer span.Finish() - resultVar0 := a.app.GenerateSupportPacket() + resultVar0 := a.app.GenerateSupportPacket(c) return resultVar0 } @@ -6991,7 +6991,7 @@ func (a *OpenTracingAppLayer) GetIncomingWebhooksPageByUser(userID string, page return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) GetJob(id string) (*model.Job, *model.AppError) { +func (a *OpenTracingAppLayer) GetJob(c *request.Context, id string) (*model.Job, *model.AppError) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJob") @@ -7003,7 +7003,7 @@ func (a *OpenTracingAppLayer) GetJob(id string) (*model.Job, *model.AppError) { }() defer span.Finish() - resultVar0, resultVar1 := a.app.GetJob(id) + resultVar0, resultVar1 := a.app.GetJob(c, id) if resultVar1 != nil { span.LogFields(spanlog.Error(resultVar1)) @@ -7013,29 +7013,7 @@ func (a *OpenTracingAppLayer) GetJob(id string) (*model.Job, *model.AppError) { return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) GetJobs(offset int, limit int) ([]*model.Job, *model.AppError) { - origCtx := a.ctx - span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJobs") - - a.ctx = newCtx - a.app.Srv().Store().SetContext(newCtx) - defer func() { - a.app.Srv().Store().SetContext(origCtx) - a.ctx = origCtx - }() - - defer span.Finish() - resultVar0, resultVar1 := a.app.GetJobs(offset, limit) - - if resultVar1 != nil { - span.LogFields(spanlog.Error(resultVar1)) - ext.Error.Set(span, true) - } - - return resultVar0, resultVar1 -} - -func (a *OpenTracingAppLayer) GetJobsByType(jobType string, offset int, limit int) ([]*model.Job, *model.AppError) { +func (a *OpenTracingAppLayer) GetJobsByType(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, *model.AppError) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJobsByType") @@ -7047,7 +7025,7 @@ func (a *OpenTracingAppLayer) GetJobsByType(jobType string, offset int, limit in }() defer span.Finish() - resultVar0, resultVar1 := a.app.GetJobsByType(jobType, offset, limit) + resultVar0, resultVar1 := a.app.GetJobsByType(c, jobType, offset, limit) if resultVar1 != nil { span.LogFields(spanlog.Error(resultVar1)) @@ -7057,7 +7035,7 @@ func (a *OpenTracingAppLayer) GetJobsByType(jobType string, offset int, limit in return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) GetJobsByTypePage(jobType string, page int, perPage int) ([]*model.Job, *model.AppError) { +func (a *OpenTracingAppLayer) GetJobsByTypePage(c *request.Context, jobType string, page int, perPage int) ([]*model.Job, *model.AppError) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJobsByTypePage") @@ -7069,7 +7047,7 @@ func (a *OpenTracingAppLayer) GetJobsByTypePage(jobType string, page int, perPag }() defer span.Finish() - resultVar0, resultVar1 := a.app.GetJobsByTypePage(jobType, page, perPage) + resultVar0, resultVar1 := a.app.GetJobsByTypePage(c, jobType, page, perPage) if resultVar1 != nil { span.LogFields(spanlog.Error(resultVar1)) @@ -7079,7 +7057,7 @@ func (a *OpenTracingAppLayer) GetJobsByTypePage(jobType string, page int, perPag return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) GetJobsByTypes(jobTypes []string, offset int, limit int) ([]*model.Job, *model.AppError) { +func (a *OpenTracingAppLayer) GetJobsByTypes(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, *model.AppError) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJobsByTypes") @@ -7091,7 +7069,7 @@ func (a *OpenTracingAppLayer) GetJobsByTypes(jobTypes []string, offset int, limi }() defer span.Finish() - resultVar0, resultVar1 := a.app.GetJobsByTypes(jobTypes, offset, limit) + resultVar0, resultVar1 := a.app.GetJobsByTypes(c, jobTypes, offset, limit) if resultVar1 != nil { span.LogFields(spanlog.Error(resultVar1)) @@ -7101,7 +7079,7 @@ func (a *OpenTracingAppLayer) GetJobsByTypes(jobTypes []string, offset int, limi return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) GetJobsByTypesPage(jobType []string, page int, perPage int) ([]*model.Job, *model.AppError) { +func (a *OpenTracingAppLayer) GetJobsByTypesPage(c *request.Context, jobType []string, page int, perPage int) ([]*model.Job, *model.AppError) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJobsByTypesPage") @@ -7113,29 +7091,7 @@ func (a *OpenTracingAppLayer) GetJobsByTypesPage(jobType []string, page int, per }() defer span.Finish() - resultVar0, resultVar1 := a.app.GetJobsByTypesPage(jobType, page, perPage) - - if resultVar1 != nil { - span.LogFields(spanlog.Error(resultVar1)) - ext.Error.Set(span, true) - } - - return resultVar0, resultVar1 -} - -func (a *OpenTracingAppLayer) GetJobsPage(page int, perPage int) ([]*model.Job, *model.AppError) { - origCtx := a.ctx - span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.GetJobsPage") - - a.ctx = newCtx - a.app.Srv().Store().SetContext(newCtx) - defer func() { - a.app.Srv().Store().SetContext(origCtx) - a.ctx = origCtx - }() - - defer span.Finish() - resultVar0, resultVar1 := a.app.GetJobsPage(page, perPage) + resultVar0, resultVar1 := a.app.GetJobsByTypesPage(c, jobType, page, perPage) if resultVar1 != nil { span.LogFields(spanlog.Error(resultVar1)) @@ -11708,7 +11664,7 @@ func (a *OpenTracingAppLayer) InstallPlugin(pluginFile io.ReadSeeker, replace bo return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) InvalidateAllEmailInvites() *model.AppError { +func (a *OpenTracingAppLayer) InvalidateAllEmailInvites(c *request.Context) *model.AppError { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.InvalidateAllEmailInvites") @@ -11720,7 +11676,7 @@ func (a *OpenTracingAppLayer) InvalidateAllEmailInvites() *model.AppError { }() defer span.Finish() - resultVar0 := a.app.InvalidateAllEmailInvites() + resultVar0 := a.app.InvalidateAllEmailInvites(c) if resultVar0 != nil { span.LogFields(spanlog.Error(resultVar0)) @@ -11730,7 +11686,7 @@ func (a *OpenTracingAppLayer) InvalidateAllEmailInvites() *model.AppError { return resultVar0 } -func (a *OpenTracingAppLayer) InvalidateAllResendInviteEmailJobs() *model.AppError { +func (a *OpenTracingAppLayer) InvalidateAllResendInviteEmailJobs(c *request.Context) *model.AppError { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.InvalidateAllResendInviteEmailJobs") @@ -11742,7 +11698,7 @@ func (a *OpenTracingAppLayer) InvalidateAllResendInviteEmailJobs() *model.AppErr }() defer span.Finish() - resultVar0 := a.app.InvalidateAllResendInviteEmailJobs() + resultVar0 := a.app.InvalidateAllResendInviteEmailJobs(c) if resultVar0 != nil { span.LogFields(spanlog.Error(resultVar0)) @@ -16736,7 +16692,7 @@ func (a *OpenTracingAppLayer) SwitchOAuthToEmail(email string, password string, return resultVar0, resultVar1 } -func (a *OpenTracingAppLayer) SyncLdap(includeRemovedMembers bool) { +func (a *OpenTracingAppLayer) SyncLdap(c *request.Context, includeRemovedMembers bool) { origCtx := a.ctx span, newCtx := tracing.StartSpanWithParentByContext(a.ctx, "app.SyncLdap") @@ -16748,7 +16704,7 @@ func (a *OpenTracingAppLayer) SyncLdap(includeRemovedMembers bool) { }() defer span.Finish() - a.app.SyncLdap(includeRemovedMembers) + a.app.SyncLdap(c, includeRemovedMembers) } func (a *OpenTracingAppLayer) SyncPlugins() *model.AppError { diff --git a/server/channels/app/plugin_hooks_test.go b/server/channels/app/plugin_hooks_test.go index 9c5321f0d1..1e96a45e5f 100644 --- a/server/channels/app/plugin_hooks_test.go +++ b/server/channels/app/plugin_hooks_test.go @@ -5,7 +5,6 @@ package app import ( "bytes" - "context" _ "embed" "fmt" "io" @@ -944,10 +943,9 @@ func TestErrorString(t *testing.T) { func TestHookContext(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // We don't actually have a session, we are faking it so just set something arbitrarily - ctx := request.NewContext(context.Background(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.NewId(), model.Session{}, nil) - ctx.SetLogger(th.TestLogger) ctx.Session().Id = model.NewId() var mockAPI plugintest.API diff --git a/server/channels/app/server.go b/server/channels/app/server.go index e340c1413a..d59447997f 100644 --- a/server/channels/app/server.go +++ b/server/channels/app/server.go @@ -1487,7 +1487,7 @@ func (ch *Channels) ClientConfigHash() string { } func (s *Server) initJobs() { - s.Jobs = jobs.NewJobServer(s.platform, s.Store(), s.GetMetrics()) + s.Jobs = jobs.NewJobServer(s.platform, s.Store(), s.GetMetrics(), s.Log()) if jobsDataRetentionJobInterface != nil { builder := jobsDataRetentionJobInterface(s) diff --git a/server/channels/app/support_packet.go b/server/channels/app/support_packet.go index f75e9b2554..90bd7a5631 100644 --- a/server/channels/app/support_packet.go +++ b/server/channels/app/support_packet.go @@ -15,10 +15,11 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/config" ) -func (a *App) GenerateSupportPacket() []model.FileData { +func (a *App) GenerateSupportPacket(c *request.Context) []model.FileData { // If any errors we come across within this function, we will log it in a warning.txt file so that we know why certain files did not get produced if any var warnings []string @@ -26,7 +27,7 @@ func (a *App) GenerateSupportPacket() []model.FileData { fileDatas := []model.FileData{} // A array of the functions that we can iterate through since they all have the same return value - functions := map[string]func() (*model.FileData, error){ + functions := map[string]func(c *request.Context) (*model.FileData, error){ "support package": a.generateSupportPacketYaml, "plugins": a.createPluginsFile, "config": a.createSanitizedConfigFile, @@ -35,7 +36,7 @@ func (a *App) GenerateSupportPacket() []model.FileData { } for name, fn := range functions { - fileData, err := fn() + fileData, err := fn(c) if err != nil { mlog.Error("Failed to generate file for support package", mlog.Err(err), mlog.String("file", name)) warnings = append(warnings, err.Error()) @@ -56,7 +57,7 @@ func (a *App) GenerateSupportPacket() []model.FileData { return fileDatas } -func (a *App) generateSupportPacketYaml() (*model.FileData, error) { +func (a *App) generateSupportPacketYaml(c *request.Context) (*model.FileData, error) { var rErr error /* DB */ @@ -112,31 +113,31 @@ func (a *App) generateSupportPacketYaml() (*model.FileData, error) { rErr = multierror.Append(errors.Wrap(err, "error while getting user count")) } - dataRetentionJobs, err := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeDataRetention, 0, 2) + dataRetentionJobs, err := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeDataRetention, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting data retention jobs")) } - messageExportJobs, err := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeMessageExport, 0, 2) + messageExportJobs, err := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeMessageExport, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting message export jobs")) } - elasticPostIndexingJobs, err := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeElasticsearchPostIndexing, 0, 2) + elasticPostIndexingJobs, err := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeElasticsearchPostIndexing, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting ES post indexing jobs")) } - elasticPostAggregationJobs, _ := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeElasticsearchPostAggregation, 0, 2) + elasticPostAggregationJobs, _ := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeElasticsearchPostAggregation, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting ES post aggregation jobs")) } - blevePostIndexingJobs, _ := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeBlevePostIndexing, 0, 2) + blevePostIndexingJobs, _ := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeBlevePostIndexing, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting bleve post indexing jobs")) } - ldapSyncJobs, err := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeLdapSync, 0, 2) + ldapSyncJobs, err := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeLdapSync, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting LDAP sync jobs")) } - migrationJobs, err := a.Srv().Store().Job().GetAllByTypePage(model.JobTypeMigrations, 0, 2) + migrationJobs, err := a.Srv().Store().Job().GetAllByTypePage(c, model.JobTypeMigrations, 0, 2) if err != nil { rErr = multierror.Append(errors.Wrap(err, "error while getting migration jobs")) } @@ -219,7 +220,8 @@ func (a *App) generateSupportPacketYaml() (*model.FileData, error) { return fileData, rErr } -func (a *App) createPluginsFile() (*model.FileData, error) { +func (a *App) createPluginsFile(_ *request.Context) (*model.FileData, error) { + // Getting the plugins installed on the server, prettify it, and then add them to the file data array pluginsResponse, appErr := a.GetPlugins() if appErr != nil { @@ -239,7 +241,7 @@ func (a *App) createPluginsFile() (*model.FileData, error) { } -func (a *App) getNotificationsLog() (*model.FileData, error) { +func (a *App) getNotificationsLog(_ *request.Context) (*model.FileData, error) { if !*a.Config().NotificationLogSettings.EnableFile { return nil, errors.New("Unable to retrieve notifications.log because LogSettings: EnableFile is set to false") } @@ -257,7 +259,7 @@ func (a *App) getNotificationsLog() (*model.FileData, error) { return fileData, nil } -func (a *App) getMattermostLog() (*model.FileData, error) { +func (a *App) getMattermostLog(_ *request.Context) (*model.FileData, error) { if !*a.Config().LogSettings.EnableFile { return nil, errors.New("Unable to retrieve mattermost.log because LogSettings: EnableFile is set to false") } @@ -275,7 +277,7 @@ func (a *App) getMattermostLog() (*model.FileData, error) { return fileData, nil } -func (a *App) createSanitizedConfigFile() (*model.FileData, error) { +func (a *App) createSanitizedConfigFile(_ *request.Context) (*model.FileData, error) { // Getting sanitized config, prettifying it, and then adding it to our file data array sanitizedConfigPrettyJSON, err := json.MarshalIndent(a.GetSanitizedConfig(), "", " ") if err != nil { diff --git a/server/channels/app/support_packet_test.go b/server/channels/app/support_packet_test.go index fdcaf170d3..306e570aa4 100644 --- a/server/channels/app/support_packet_test.go +++ b/server/channels/app/support_packet_test.go @@ -13,6 +13,7 @@ import ( "gopkg.in/yaml.v2" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/app/platform" fmocks "github.com/mattermost/mattermost/server/v8/platform/shared/filestore/mocks" ) @@ -20,9 +21,10 @@ import ( func TestCreatePluginsFile(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // Happy path where we have a plugins file with no err - fileData, err := th.App.createPluginsFile() + fileData, err := th.App.createPluginsFile(ctx) require.NotNil(t, fileData) assert.Equal(t, "plugins.json", fileData.Filename) assert.Positive(t, len(fileData.Body)) @@ -34,7 +36,7 @@ func TestCreatePluginsFile(t *testing.T) { }) // Plugins off in settings so no fileData and we get a warning instead - fileData, err = th.App.createPluginsFile() + fileData, err = th.App.createPluginsFile(ctx) assert.Nil(t, fileData) assert.ErrorContains(t, err, "failed to get plugin list for support package") } @@ -42,6 +44,7 @@ func TestCreatePluginsFile(t *testing.T) { func TestGenerateSupportPacketYaml(t *testing.T) { th := Setup(t).InitBasic() defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) licenseUsers := 100 license := model.NewTestLicense() @@ -51,7 +54,7 @@ func TestGenerateSupportPacketYaml(t *testing.T) { t.Run("Happy path", func(t *testing.T) { // Happy path where we have a support packet yaml file without any warnings - fileData, err := th.App.generateSupportPacketYaml() + fileData, err := th.App.generateSupportPacketYaml(ctx) require.NotNil(t, fileData) assert.Equal(t, "support_packet.yaml", fileData.Filename) assert.Positive(t, len(fileData.Body)) @@ -72,7 +75,7 @@ func TestGenerateSupportPacketYaml(t *testing.T) { fb.On("DriverName").Return("mock") fb.On("TestConnection").Return(errors.New("all broken")) - fileData, err := th.App.generateSupportPacketYaml() + fileData, err := th.App.generateSupportPacketYaml(ctx) require.NotNil(t, fileData) assert.Equal(t, "support_packet.yaml", fileData.Filename) assert.Positive(t, len(fileData.Body)) @@ -89,6 +92,7 @@ func TestGenerateSupportPacketYaml(t *testing.T) { func TestGenerateSupportPacket(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) d1 := []byte("hello\ngo\n") err := os.WriteFile("mattermost.log", d1, 0777) @@ -96,7 +100,7 @@ func TestGenerateSupportPacket(t *testing.T) { err = os.WriteFile("notifications.log", d1, 0777) require.NoError(t, err) - fileDatas := th.App.GenerateSupportPacket() + fileDatas := th.App.GenerateSupportPacket(ctx) var rFileNames []string testFiles := []string{"support_packet.yaml", "plugins.json", "sanitized_config.json", "mattermost.log", "notifications.log"} for _, fileData := range fileDatas { @@ -112,7 +116,7 @@ func TestGenerateSupportPacket(t *testing.T) { require.NoError(t, err) err = os.Remove("mattermost.log") require.NoError(t, err) - fileDatas = th.App.GenerateSupportPacket() + fileDatas = th.App.GenerateSupportPacket(ctx) testFiles = []string{"support_packet.yaml", "plugins.json", "sanitized_config.json", "warning.txt"} rFileNames = nil for _, fileData := range fileDatas { @@ -127,13 +131,14 @@ func TestGenerateSupportPacket(t *testing.T) { func TestGetNotificationsLog(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // Disable notifications file to get an error th.App.UpdateConfig(func(cfg *model.Config) { *cfg.NotificationLogSettings.EnableFile = false }) - fileData, err := th.App.getNotificationsLog() + fileData, err := th.App.getNotificationsLog(ctx) assert.Nil(t, fileData) assert.ErrorContains(t, err, "Unable to retrieve notifications.log because LogSettings: EnableFile is set to false") @@ -145,7 +150,7 @@ func TestGetNotificationsLog(t *testing.T) { // If any previous notifications.log file, lets delete it os.Remove("notifications.log") - fileData, err = th.App.getNotificationsLog() + fileData, err = th.App.getNotificationsLog(ctx) assert.Nil(t, fileData) assert.ErrorContains(t, err, "failed read notifcation log file at path") @@ -155,7 +160,7 @@ func TestGetNotificationsLog(t *testing.T) { defer os.Remove("notifications.log") require.NoError(t, err) - fileData, err = th.App.getNotificationsLog() + fileData, err = th.App.getNotificationsLog(ctx) require.NotNil(t, fileData) assert.Equal(t, "notifications.log", fileData.Filename) assert.Positive(t, len(fileData.Body)) @@ -165,13 +170,14 @@ func TestGetNotificationsLog(t *testing.T) { func TestGetMattermostLog(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // disable mattermost log file setting in config so we should get an warning th.App.UpdateConfig(func(cfg *model.Config) { *cfg.LogSettings.EnableFile = false }) - fileData, err := th.App.getMattermostLog() + fileData, err := th.App.getMattermostLog(ctx) assert.Nil(t, fileData) assert.ErrorContains(t, err, "Unable to retrieve mattermost.log because LogSettings: EnableFile is set to false") @@ -183,7 +189,7 @@ func TestGetMattermostLog(t *testing.T) { // If any previous mattermost.log file, lets delete it os.Remove("mattermost.log") - fileData, err = th.App.getMattermostLog() + fileData, err = th.App.getMattermostLog(ctx) assert.Nil(t, fileData) assert.ErrorContains(t, err, "failed read mattermost log file at path mattermost.log") @@ -193,7 +199,7 @@ func TestGetMattermostLog(t *testing.T) { defer os.Remove("mattermost.log") require.NoError(t, err) - fileData, err = th.App.getMattermostLog() + fileData, err = th.App.getMattermostLog(ctx) require.NotNil(t, fileData) assert.Equal(t, "mattermost.log", fileData.Filename) assert.Positive(t, len(fileData.Body)) @@ -203,9 +209,10 @@ func TestGetMattermostLog(t *testing.T) { func TestCreateSanitizedConfigFile(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) // Happy path where we have a sanitized config file with no err - fileData, err := th.App.createSanitizedConfigFile() + fileData, err := th.App.createSanitizedConfigFile(ctx) require.NotNil(t, fileData) assert.Equal(t, "sanitized_config.json", fileData.Filename) assert.Positive(t, len(fileData.Body)) diff --git a/server/channels/app/team.go b/server/channels/app/team.go index 9521e96b12..449acb2b7b 100644 --- a/server/channels/app/team.go +++ b/server/channels/app/team.go @@ -2057,21 +2057,21 @@ func (a *App) RemoveTeamIcon(teamID string) *model.AppError { return nil } -func (a *App) InvalidateAllEmailInvites() *model.AppError { +func (a *App) InvalidateAllEmailInvites(c *request.Context) *model.AppError { if err := a.Srv().Store().Token().RemoveAllTokensByType(TokenTypeTeamInvitation); err != nil { return model.NewAppError("InvalidateAllEmailInvites", "api.team.invalidate_all_email_invites.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } if err := a.Srv().Store().Token().RemoveAllTokensByType(TokenTypeGuestInvitation); err != nil { return model.NewAppError("InvalidateAllEmailInvites", "api.team.invalidate_all_email_invites.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } - if err := a.InvalidateAllResendInviteEmailJobs(); err != nil { + if err := a.InvalidateAllResendInviteEmailJobs(c); err != nil { return model.NewAppError("InvalidateAllEmailInvites", "api.team.invalidate_all_email_invites.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } return nil } -func (a *App) InvalidateAllResendInviteEmailJobs() *model.AppError { - jobs, appErr := a.Srv().Jobs.GetJobsByTypeAndStatus(model.JobTypeResendInvitationEmail, model.JobStatusPending) +func (a *App) InvalidateAllResendInviteEmailJobs(c *request.Context) *model.AppError { + jobs, appErr := a.Srv().Jobs.GetJobsByTypeAndStatus(c, model.JobTypeResendInvitationEmail, model.JobStatusPending) if appErr != nil { return appErr } diff --git a/server/channels/app/team_test.go b/server/channels/app/team_test.go index 33478752f0..e869e03f9f 100644 --- a/server/channels/app/team_test.go +++ b/server/channels/app/team_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/require" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/app/email" emailmocks "github.com/mattermost/mattermost/server/v8/channels/app/email/mocks" "github.com/mattermost/mattermost/server/v8/channels/app/teams" @@ -1357,18 +1358,19 @@ func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) { func TestInvalidateAllResendInviteEmailJobs(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) - job, err := th.App.Srv().Jobs.CreateJob(model.JobTypeResendInvitationEmail, map[string]string{}) + job, err := th.App.Srv().Jobs.CreateJob(ctx, model.JobTypeResendInvitationEmail, map[string]string{}) require.Nil(t, err) sysVar := &model.System{Name: job.Id, Value: "0"} e := th.App.Srv().Store().System().SaveOrUpdate(sysVar) require.NoError(t, e) - appErr := th.App.InvalidateAllResendInviteEmailJobs() + appErr := th.App.InvalidateAllResendInviteEmailJobs(ctx) require.Nil(t, appErr) - j, e := th.App.Srv().Store().Job().Get(job.Id) + j, e := th.App.Srv().Store().Job().Get(ctx, job.Id) require.NoError(t, e) require.Equal(t, j.Status, model.JobStatusCanceled) @@ -1380,6 +1382,7 @@ func TestInvalidateAllResendInviteEmailJobs(t *testing.T) { func TestInvalidateAllEmailInvites(t *testing.T) { th := Setup(t) defer th.TearDown() + ctx := request.EmptyContext(th.TestLogger) t1 := model.Token{ Token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", @@ -1408,7 +1411,7 @@ func TestInvalidateAllEmailInvites(t *testing.T) { err = th.App.Srv().Store().Token().Save(&t3) require.NoError(t, err) - appErr := th.App.InvalidateAllEmailInvites() + appErr := th.App.InvalidateAllEmailInvites(ctx) require.Nil(t, appErr) _, err = th.App.Srv().Store().Token().GetByToken(t1.Token) diff --git a/server/channels/app/teams/helper_test.go b/server/channels/app/teams/helper_test.go index 2eaebb56e5..88b6549489 100644 --- a/server/channels/app/teams/helper_test.go +++ b/server/channels/app/teams/helper_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/config" @@ -80,7 +81,7 @@ func setupTestHelper(s store.Store, includeCacheLayer bool, tb testing.TB) *Test }, wh: &mockWebHub{}, }, - Context: request.EmptyContext(nil), + Context: request.EmptyContext(mlog.CreateConsoleTestLogger(tb)), configStore: configStore, dbStore: s, LogBuffer: buffer, diff --git a/server/channels/app/users/helper_test.go b/server/channels/app/users/helper_test.go index 0cb615b28d..0a6eff3830 100644 --- a/server/channels/app/users/helper_test.go +++ b/server/channels/app/users/helper_test.go @@ -11,6 +11,7 @@ import ( "testing" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/config" @@ -78,7 +79,7 @@ func setupTestHelper(s store.Store, includeCacheLayer bool, tb testing.TB) *Test oAuthStore: s.OAuth(), config: configStore.Get, }, - Context: request.EmptyContext(nil), + Context: request.EmptyContext(mlog.CreateConsoleTestLogger(tb)), configStore: configStore, dbStore: s, LogBuffer: buffer, diff --git a/server/channels/jobs/active_users/scheduler.go b/server/channels/jobs/active_users/scheduler.go index 42dc167572..3543923dee 100644 --- a/server/channels/jobs/active_users/scheduler.go +++ b/server/channels/jobs/active_users/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 10 * time.Minute -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return *cfg.MetricsSettings.Enable } diff --git a/server/channels/jobs/active_users/worker.go b/server/channels/jobs/active_users/worker.go index 08e235fdcd..a7239eaa2b 100644 --- a/server/channels/jobs/active_users/worker.go +++ b/server/channels/jobs/active_users/worker.go @@ -10,11 +10,9 @@ import ( "github.com/mattermost/mattermost/server/v8/einterfaces" ) -const ( - JobName = "ActiveUsers" -) +func MakeWorker(jobServer *jobs.JobServer, store store.Store, getMetrics func() einterfaces.MetricsInterface) *jobs.SimpleWorker { + const workerName = "ActiveUsers" -func MakeWorker(jobServer *jobs.JobServer, store store.Store, getMetrics func() einterfaces.MetricsInterface) model.Worker { isEnabled := func(cfg *model.Config) bool { return *cfg.MetricsSettings.Enable } @@ -31,6 +29,6 @@ func MakeWorker(jobServer *jobs.JobServer, store store.Store, getMetrics func() } return nil } - worker := jobs.NewSimpleWorker(JobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/base_schedulers.go b/server/channels/jobs/base_schedulers.go index 881b840a2d..c1cb5362b7 100644 --- a/server/channels/jobs/base_schedulers.go +++ b/server/channels/jobs/base_schedulers.go @@ -9,6 +9,7 @@ import ( "time" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" ) type PeriodicScheduler struct { @@ -18,6 +19,8 @@ type PeriodicScheduler struct { enabledFunc func(cfg *model.Config) bool } +var _ Scheduler = (*DailyScheduler)(nil) + func NewPeriodicScheduler(jobs *JobServer, jobType string, period time.Duration, enabledFunc func(cfg *model.Config) bool) *PeriodicScheduler { return &PeriodicScheduler{ period: period, @@ -36,8 +39,8 @@ func (scheduler *PeriodicScheduler) NextScheduleTime(_ *model.Config, _ time.Tim return &nextTime } -func (scheduler *PeriodicScheduler) ScheduleJob(_ *model.Config /* pendingJobs */, _ bool /* lastSuccessfulJob */, _ *model.Job) (*model.Job, *model.AppError) { - return scheduler.jobs.CreateJob(scheduler.jobType, nil) +func (scheduler *PeriodicScheduler) ScheduleJob(c *request.Context, _ *model.Config /* pendingJobs */, _ bool /* lastSuccessfulJob */, _ *model.Job) (*model.Job, *model.AppError) { + return scheduler.jobs.CreateJob(c, scheduler.jobType, nil) } type DailyScheduler struct { @@ -47,6 +50,8 @@ type DailyScheduler struct { enabledFunc func(cfg *model.Config) bool } +var _ Scheduler = (*DailyScheduler)(nil) + func NewDailyScheduler(jobs *JobServer, jobType string, startTimeFunc func(cfg *model.Config) *time.Time, enabledFunc func(cfg *model.Config) bool) *DailyScheduler { return &DailyScheduler{ startTimeFunc: startTimeFunc, @@ -69,8 +74,8 @@ func (scheduler *DailyScheduler) NextScheduleTime(cfg *model.Config, now time.Ti return GenerateNextStartDateTime(now, *scheduledTime) } -func (scheduler *DailyScheduler) ScheduleJob(_ *model.Config /* pendingJobs */, _ bool /* lastSuccessfulJob */, _ *model.Job) (*model.Job, *model.AppError) { - return scheduler.jobs.CreateJob(scheduler.jobType, nil) +func (scheduler *DailyScheduler) ScheduleJob(c *request.Context, _ *model.Config /* pendingJobs */, _ bool /* lastSuccessfulJob */, _ *model.Job) (*model.Job, *model.AppError) { + return scheduler.jobs.CreateJob(c, scheduler.jobType, nil) } const jitterRange = 2000 // milliseconds diff --git a/server/channels/jobs/base_workers.go b/server/channels/jobs/base_workers.go index c427a81ab2..dc4096df00 100644 --- a/server/channels/jobs/base_workers.go +++ b/server/channels/jobs/base_workers.go @@ -8,6 +8,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" ) type SimpleWorker struct { @@ -16,6 +17,7 @@ type SimpleWorker struct { stopped chan bool jobs chan model.Job jobServer *JobServer + logger mlog.LoggerIFace execute func(job *model.Job) error isEnabled func(cfg *model.Config) bool } @@ -27,6 +29,7 @@ func NewSimpleWorker(name string, jobServer *JobServer, execute func(job *model. stopped: make(chan bool, 1), jobs: make(chan model.Job), jobServer: jobServer, + logger: jobServer.Logger().With(mlog.String("workername", name)), execute: execute, isEnabled: isEnabled, } @@ -34,27 +37,29 @@ func NewSimpleWorker(name string, jobServer *JobServer, execute func(job *model. } func (worker *SimpleWorker) Run() { - mlog.Debug("Worker started", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker started") defer func() { - mlog.Debug("Worker finished", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker finished") worker.stopped <- true }() for { select { case <-worker.stop: - mlog.Debug("Worker received stop signal", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker received stop signal") return case job := <-worker.jobs: - mlog.Debug("Worker received a new candidate job.", mlog.String("worker", worker.name)) + job.Logger = job.Logger.With(mlog.String("workername", worker.name)) + + job.Logger.Debug("Worker received a new candidate job") worker.DoJob(&job) } } } func (worker *SimpleWorker) Stop() { - mlog.Debug("Worker stopping", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker stopping") worker.stop <- true <-worker.stopped } @@ -69,48 +74,47 @@ func (worker *SimpleWorker) IsEnabled(cfg *model.Config) bool { func (worker *SimpleWorker) DoJob(job *model.Job) { if claimed, err := worker.jobServer.ClaimJob(job); err != nil { - mlog.Warn("SimpleWorker experienced an error while trying to claim job", - mlog.String("worker", worker.name), - mlog.String("job_id", job.Id), - mlog.Err(err)) + job.Logger.Warn("SimpleWorker experienced an error while trying to claim job", mlog.Err(err)) return } else if !claimed { return } + c := request.EmptyContext(worker.logger) + var appErr *model.AppError // We get the job again because ClaimJob changes the job status. - job, appErr = worker.jobServer.GetJob(job.Id) + job, appErr = worker.jobServer.GetJob(c, job.Id) if appErr != nil { - mlog.Error("SimpleWorker: job execution error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(appErr)) + job.Logger.Error("SimpleWorker: job execution error", mlog.Err(appErr)) worker.setJobError(job, appErr) } err := worker.execute(job) if err != nil { - mlog.Error("SimpleWorker: job execution error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("SimpleWorker: job execution error", mlog.Err(err)) worker.setJobError(job, model.NewAppError("DoJob", "app.job.error", nil, "", http.StatusInternalServerError).Wrap(err)) return } - mlog.Info("SimpleWorker: Job is complete", mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("SimpleWorker: Job is complete") worker.setJobSuccess(job) } func (worker *SimpleWorker) setJobSuccess(job *model.Job) { if err := worker.jobServer.SetJobProgress(job, 100); err != nil { - mlog.Error("Worker: Failed to update progress for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to update progress for job", mlog.Err(err)) worker.setJobError(job, err) } if err := worker.jobServer.SetJobSuccess(job); err != nil { - mlog.Error("SimpleWorker: Failed to set success for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("SimpleWorker: Failed to set success for job", mlog.Err(err)) worker.setJobError(job, err) } } func (worker *SimpleWorker) setJobError(job *model.Job, appError *model.AppError) { if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("SimpleWorker: Failed to set job error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("SimpleWorker: Failed to set job error", mlog.Err(err)) } } diff --git a/server/channels/jobs/cleanup_desktop_tokens/scheduler.go b/server/channels/jobs/cleanup_desktop_tokens/scheduler.go index 879f11b575..9770633cf1 100644 --- a/server/channels/jobs/cleanup_desktop_tokens/scheduler.go +++ b/server/channels/jobs/cleanup_desktop_tokens/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 1 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return true } diff --git a/server/channels/jobs/cleanup_desktop_tokens/worker.go b/server/channels/jobs/cleanup_desktop_tokens/worker.go index 43f5b43819..69c7b42c0d 100644 --- a/server/channels/jobs/cleanup_desktop_tokens/worker.go +++ b/server/channels/jobs/cleanup_desktop_tokens/worker.go @@ -22,7 +22,7 @@ type AppIface interface { RemoveFile(path string) *model.AppError } -func MakeWorker(jobServer *jobs.JobServer, store store.Store) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, store store.Store) *jobs.SimpleWorker { isEnabled := func(cfg *model.Config) bool { return true } diff --git a/server/channels/jobs/expirynotify/scheduler.go b/server/channels/jobs/expirynotify/scheduler.go index d7e1c2c590..9fbc4db027 100644 --- a/server/channels/jobs/expirynotify/scheduler.go +++ b/server/channels/jobs/expirynotify/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 10 * time.Minute -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return *cfg.ServiceSettings.ExtendSessionLengthWithActivity } diff --git a/server/channels/jobs/expirynotify/worker.go b/server/channels/jobs/expirynotify/worker.go index c6b3267986..dc08f81594 100644 --- a/server/channels/jobs/expirynotify/worker.go +++ b/server/channels/jobs/expirynotify/worker.go @@ -8,11 +8,9 @@ import ( "github.com/mattermost/mattermost/server/v8/channels/jobs" ) -const ( - JobName = "ExpiryNotify" -) +func MakeWorker(jobServer *jobs.JobServer, notifySessionsExpired func() error) *jobs.SimpleWorker { + const workerName = "ExpiryNotify" -func MakeWorker(jobServer *jobs.JobServer, notifySessionsExpired func() error) model.Worker { isEnabled := func(cfg *model.Config) bool { return *cfg.ServiceSettings.ExtendSessionLengthWithActivity } @@ -21,5 +19,5 @@ func MakeWorker(jobServer *jobs.JobServer, notifySessionsExpired func() error) m return notifySessionsExpired() } - return jobs.NewSimpleWorker(JobName, jobServer, execute, isEnabled) + return jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) } diff --git a/server/channels/jobs/export_delete/scheduler.go b/server/channels/jobs/export_delete/scheduler.go index 87469e90d7..e7c1232872 100644 --- a/server/channels/jobs/export_delete/scheduler.go +++ b/server/channels/jobs/export_delete/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 24 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return *cfg.ExportSettings.Directory != "" && *cfg.ExportSettings.RetentionDays > 0 } diff --git a/server/channels/jobs/export_delete/worker.go b/server/channels/jobs/export_delete/worker.go index 0974a4fba2..b1af938849 100644 --- a/server/channels/jobs/export_delete/worker.go +++ b/server/channels/jobs/export_delete/worker.go @@ -15,8 +15,6 @@ import ( "github.com/mattermost/mattermost/server/v8/platform/services/configservice" ) -const jobName = "ExportDelete" - type AppIface interface { configservice.ConfigService ListExportDirectory(path string) ([]string, *model.AppError) @@ -24,7 +22,9 @@ type AppIface interface { RemoveExportFile(path string) *model.AppError } -func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface) *jobs.SimpleWorker { + const workerName = "ExportDelete" + isEnabled := func(cfg *model.Config) bool { return *cfg.ExportSettings.Directory != "" && *cfg.ExportSettings.RetentionDays > 0 } @@ -43,7 +43,7 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { filename := filepath.Base(exports[i]) modTime, appErr := app.ExportFileModTime(filepath.Join(exportPath, filename)) if appErr != nil { - mlog.Debug("Worker: Failed to get file modification time", + job.Logger.Debug("Worker: Failed to get file modification time", mlog.Err(appErr), mlog.String("export", exports[i])) errors.Append(appErr) continue @@ -52,7 +52,7 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { if time.Now().After(modTime.Add(retentionTime)) { // remove file data from storage. if appErr := app.RemoveExportFile(exports[i]); appErr != nil { - mlog.Debug("Worker: Failed to remove file", + job.Logger.Debug("Worker: Failed to remove file", mlog.Err(appErr), mlog.String("export", exports[i])) errors.Append(appErr) continue @@ -61,10 +61,10 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { } if err := errors.ErrorOrNil(); err != nil { - mlog.Warn("Worker: errors occurred", mlog.String("job-name", jobName), mlog.Err(err)) + job.Logger.Warn("Worker: errors occurred", mlog.Err(err)) } return nil } - worker := jobs.NewSimpleWorker(jobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/export_process/worker.go b/server/channels/jobs/export_process/worker.go index f7fc88b910..0d0ab28ee8 100644 --- a/server/channels/jobs/export_process/worker.go +++ b/server/channels/jobs/export_process/worker.go @@ -15,8 +15,6 @@ import ( "github.com/mattermost/mattermost/server/v8/platform/services/configservice" ) -const jobName = "ExportProcess" - type AppIface interface { configservice.ConfigService WriteExportFileContext(ctx context.Context, fr io.Reader, path string) (int64, *model.AppError) @@ -24,7 +22,9 @@ type AppIface interface { Log() *mlog.Logger } -func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface) *jobs.SimpleWorker { + const workerName = "ExportProcess" + isEnabled := func(cfg *model.Config) bool { return true } execute := func(job *model.Job) error { defer jobServer.HandleJobPanic(job) @@ -59,8 +59,7 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { } }() - logger := app.Log().With(mlog.String("job_id", job.Id)) - appErr := app.BulkExport(request.EmptyContext(logger), wr, outPath, job, opts) + appErr := app.BulkExport(request.EmptyContext(job.Logger), wr, outPath, job, opts) wr.Close() // Close never returns an error if appErr != nil { @@ -69,6 +68,6 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { return nil } - worker := jobs.NewSimpleWorker(jobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/extract_content/worker.go b/server/channels/jobs/extract_content/worker.go index 7a6d6e182d..7e18eafc54 100644 --- a/server/channels/jobs/extract_content/worker.go +++ b/server/channels/jobs/extract_content/worker.go @@ -19,13 +19,13 @@ var ignoredFiles = map[string]bool{ "mkv": true, } -const jobName = "ExtractContent" - type AppIface interface { ExtractContentFromFileInfo(fileInfo *model.FileInfo) error } -func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store) *jobs.SimpleWorker { + const workerName = "ExtractContent" + isEnabled := func(cfg *model.Config) bool { return true } @@ -65,10 +65,10 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store) mode } for _, fileInfo := range fileInfos { if !ignoredFiles[fileInfo.Extension] { - mlog.Debug("extracting file", mlog.String("filename", fileInfo.Name), mlog.String("filepath", fileInfo.Path)) + job.Logger.Debug("Extracting file", mlog.String("filename", fileInfo.Name), mlog.String("filepath", fileInfo.Path)) err = app.ExtractContentFromFileInfo(fileInfo) if err != nil { - mlog.Warn("Failed to extract file content", mlog.Err(err), mlog.String("file_info_id", fileInfo.Id)) + job.Logger.Warn("Failed to extract file content", mlog.Err(err), mlog.String("file_info_id", fileInfo.Id)) nErrs++ } nFiles++ @@ -85,10 +85,10 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store) mode job.Data["processed"] = strconv.Itoa(nFiles) if err := jobServer.UpdateInProgressJobData(job); err != nil { - mlog.Error("Worker: Failed to update job data", mlog.String("worker", model.JobTypeExtractContent), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to update job data", mlog.Err(err)) } return nil } - worker := jobs.NewSimpleWorker(jobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/hosted_purchase_screening/scheduler.go b/server/channels/jobs/hosted_purchase_screening/scheduler.go index 5db6e7adf8..817abc7454 100644 --- a/server/channels/jobs/hosted_purchase_screening/scheduler.go +++ b/server/channels/jobs/hosted_purchase_screening/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 24 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer, license *model.License) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer, license *model.License) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return model.BuildEnterpriseReady == "true" && license == nil } diff --git a/server/channels/jobs/hosted_purchase_screening/worker.go b/server/channels/jobs/hosted_purchase_screening/worker.go index 0fc15d37e8..368e94ea15 100644 --- a/server/channels/jobs/hosted_purchase_screening/worker.go +++ b/server/channels/jobs/hosted_purchase_screening/worker.go @@ -12,7 +12,6 @@ import ( ) const ( - JobName = "HostedPurchaseScreening" // 3 days matches the expecation given in portal purchase flow. waitForScreeningDuration = 3 * 24 * time.Hour ) @@ -22,7 +21,9 @@ type ScreenTimeStore interface { PermanentDeleteByName(name string) (*model.System, error) } -func MakeWorker(jobServer *jobs.JobServer, license *model.License, screenTimeStore ScreenTimeStore) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, license *model.License, screenTimeStore ScreenTimeStore) *jobs.SimpleWorker { + const workerName = "HostedPurchaseScreening" + isEnabled := func(_ *model.Config) bool { return !license.IsCloud() } @@ -44,6 +45,6 @@ func MakeWorker(jobServer *jobs.JobServer, license *model.License, screenTimeSto } return nil } - worker := jobs.NewSimpleWorker(JobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/import_delete/scheduler.go b/server/channels/jobs/import_delete/scheduler.go index 2ef6ea9a88..ea65f34274 100644 --- a/server/channels/jobs/import_delete/scheduler.go +++ b/server/channels/jobs/import_delete/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 24 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return *cfg.ImportSettings.Directory != "" && *cfg.ImportSettings.RetentionDays > 0 } diff --git a/server/channels/jobs/import_delete/worker.go b/server/channels/jobs/import_delete/worker.go index 680a85a4c9..6554ff3131 100644 --- a/server/channels/jobs/import_delete/worker.go +++ b/server/channels/jobs/import_delete/worker.go @@ -17,8 +17,6 @@ import ( "github.com/mattermost/mattermost/server/v8/platform/services/configservice" ) -const jobName = "ImportDelete" - type AppIface interface { configservice.ConfigService ListDirectory(path string) ([]string, *model.AppError) @@ -26,7 +24,9 @@ type AppIface interface { RemoveFile(path string) *model.AppError } -func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) *jobs.SimpleWorker { + const workerName = "ImportDelete" + isEnabled := func(cfg *model.Config) bool { return *cfg.ImportSettings.Directory != "" && *cfg.ImportSettings.RetentionDays > 0 } @@ -45,7 +45,7 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) model.Wo filename := filepath.Base(imports[i]) modTime, appErr := app.FileModTime(filepath.Join(importPath, filename)) if appErr != nil { - mlog.Debug("Worker: Failed to get file modification time", + job.Logger.Debug("Worker: Failed to get file modification time", mlog.Err(appErr), mlog.String("import", imports[i])) multipleErrors.Append(appErr) continue @@ -60,7 +60,7 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) model.Wo if len(filename) > minLen && filepath.Ext(filename) == model.IncompleteUploadSuffix { uploadID := filename[:26] if storeErr := s.UploadSession().Delete(uploadID); storeErr != nil { - mlog.Debug("Worker: Failed to delete UploadSession", + job.Logger.Debug("Worker: Failed to delete UploadSession", mlog.Err(storeErr), mlog.String("upload_id", uploadID)) multipleErrors.Append(storeErr) continue @@ -71,13 +71,13 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) model.Wo info, storeErr := s.FileInfo().GetByPath(filePath) var nfErr *store.ErrNotFound if storeErr != nil && !errors.As(storeErr, &nfErr) { - mlog.Debug("Worker: Failed to get FileInfo", + job.Logger.Debug("Worker: Failed to get FileInfo", mlog.Err(storeErr), mlog.String("path", filePath)) multipleErrors.Append(storeErr) continue } else if storeErr == nil { if storeErr = s.FileInfo().PermanentDelete(info.Id); storeErr != nil { - mlog.Debug("Worker: Failed to delete FileInfo", + job.Logger.Debug("Worker: Failed to delete FileInfo", mlog.Err(storeErr), mlog.String("file_id", info.Id)) multipleErrors.Append(storeErr) continue @@ -87,7 +87,7 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) model.Wo // remove file data from storage. if appErr := app.RemoveFile(imports[i]); appErr != nil { - mlog.Debug("Worker: Failed to remove file", + job.Logger.Debug("Worker: Failed to remove file", mlog.Err(appErr), mlog.String("import", imports[i])) multipleErrors.Append(appErr) continue @@ -96,10 +96,10 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, s store.Store) model.Wo } if err := multipleErrors.ErrorOrNil(); err != nil { - mlog.Warn("Worker: errors occurred", mlog.String("job-name", jobName), mlog.Err(err)) + job.Logger.Warn("Worker: errors occurred", mlog.Err(err)) } return nil } - worker := jobs.NewSimpleWorker(jobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/import_process/worker.go b/server/channels/jobs/import_process/worker.go index cf3ee8d56b..a234751862 100644 --- a/server/channels/jobs/import_process/worker.go +++ b/server/channels/jobs/import_process/worker.go @@ -20,8 +20,6 @@ import ( "github.com/mattermost/mattermost/server/v8/platform/shared/filestore" ) -const jobName = "ImportProcess" - type AppIface interface { configservice.ConfigService RemoveFile(path string) *model.AppError @@ -32,8 +30,10 @@ type AppIface interface { Log() *mlog.Logger } -func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { - appContext := request.EmptyContext(app.Log()) +func MakeWorker(jobServer *jobs.JobServer, app AppIface) *jobs.SimpleWorker { + const workerName = "ImportProcess" + + appContext := request.EmptyContext(jobServer.Logger()) isEnabled := func(cfg *model.Config) bool { return true } @@ -113,6 +113,6 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { } return nil } - worker := jobs.NewSimpleWorker(jobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/jobs.go b/server/channels/jobs/jobs.go index f9f6c89600..3b67eee663 100644 --- a/server/channels/jobs/jobs.go +++ b/server/channels/jobs/jobs.go @@ -4,7 +4,6 @@ package jobs import ( - "context" "errors" "fmt" "net/http" @@ -14,6 +13,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" ) @@ -21,8 +21,8 @@ const ( CancelWatcherPollingInterval = 5000 ) -func (srv *JobServer) CreateJob(jobType string, jobData map[string]string) (*model.Job, *model.AppError) { - job, appErr := srv._createJob(jobType, jobData) +func (srv *JobServer) CreateJob(c *request.Context, jobType string, jobData map[string]string) (*model.Job, *model.AppError) { + job, appErr := srv._createJob(c, jobType, jobData) if appErr != nil { return nil, appErr } @@ -34,8 +34,8 @@ func (srv *JobServer) CreateJob(jobType string, jobData map[string]string) (*mod return job, nil } -func (srv *JobServer) CreateJobOnce(jobType string, jobData map[string]string) (*model.Job, *model.AppError) { - job, appErr := srv._createJob(jobType, jobData) +func (srv *JobServer) CreateJobOnce(c *request.Context, jobType string, jobData map[string]string) (*model.Job, *model.AppError) { + job, appErr := srv._createJob(c, jobType, jobData) if appErr != nil { return nil, appErr } @@ -47,7 +47,7 @@ func (srv *JobServer) CreateJobOnce(jobType string, jobData map[string]string) ( return job, nil } -func (srv *JobServer) _createJob(jobType string, jobData map[string]string) (*model.Job, *model.AppError) { +func (srv *JobServer) _createJob(c *request.Context, jobType string, jobData map[string]string) (*model.Job, *model.AppError) { job := model.Job{ Id: model.NewId(), Type: jobType, @@ -56,6 +56,8 @@ func (srv *JobServer) _createJob(jobType string, jobData map[string]string) (*mo Data: jobData, } + job.InitLogger(c.Logger()) + if err := job.IsValid(); err != nil { return nil, err } @@ -67,8 +69,8 @@ func (srv *JobServer) _createJob(jobType string, jobData map[string]string) (*mo return &job, nil } -func (srv *JobServer) GetJob(id string) (*model.Job, *model.AppError) { - job, err := srv.Store.Job().Get(id) +func (srv *JobServer) GetJob(c *request.Context, id string) (*model.Job, *model.AppError) { + job, err := srv.Store.Job().Get(c, id) if err != nil { var nfErr *store.ErrNotFound switch { @@ -212,9 +214,15 @@ func (srv *JobServer) HandleJobPanic(job *model.Job) { return } + var logger mlog.LoggerIFace = job.Logger + if job.Logger == nil { + // Fall back to JobServer logger + logger = srv.logger + } + sb := &strings.Builder{} pprof.Lookup("goroutine").WriteTo(sb, 2) - mlog.Error("Unhandled panic in job", mlog.Any("panic", r), mlog.Any("job", job), mlog.String("stack", sb.String())) + logger.Error("Unhandled panic in job", mlog.Any("panic", r), mlog.Any("job", job), mlog.String("stack", sb.String())) rerr, ok := r.(error) if !ok { @@ -223,20 +231,20 @@ func (srv *JobServer) HandleJobPanic(job *model.Job) { appErr := srv.SetJobError(job, model.NewAppError("HandleJobPanic", "app.job.update.app_error", nil, "", http.StatusInternalServerError)).Wrap(rerr) if appErr != nil { - mlog.Error("Failed to set the job status to 'failed'", mlog.Err(appErr), mlog.Any("job", job)) + logger.Error("Failed to set the job status to 'failed'", mlog.Err(appErr), mlog.Any("job", job)) } panic(r) } -func (srv *JobServer) RequestCancellation(jobId string) *model.AppError { +func (srv *JobServer) RequestCancellation(c *request.Context, jobId string) *model.AppError { updated, err := srv.Store.Job().UpdateStatusOptimistically(jobId, model.JobStatusPending, model.JobStatusCanceled) if err != nil { return model.NewAppError("RequestCancellation", "app.job.update.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } if updated { if srv.metrics != nil { - job, err := srv.GetJob(jobId) + job, err := srv.GetJob(c, jobId) if err != nil { return model.NewAppError("RequestCancellation", "app.job.update.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } @@ -259,17 +267,17 @@ func (srv *JobServer) RequestCancellation(jobId string) *model.AppError { return model.NewAppError("RequestCancellation", "jobs.request_cancellation.status.error", nil, "id="+jobId, http.StatusInternalServerError) } -func (srv *JobServer) CancellationWatcher(ctx context.Context, jobId string, cancelChan chan struct{}) { +func (srv *JobServer) CancellationWatcher(c *request.Context, jobId string, cancelChan chan struct{}) { for { select { - case <-ctx.Done(): - mlog.Debug("CancellationWatcher for Job Aborting as job has finished.", mlog.String("job_id", jobId)) + case <-c.Context().Done(): + c.Logger().Debug("CancellationWatcher for Job Aborting as job has finished.", mlog.String("job_id", jobId)) return case <-time.After(CancelWatcherPollingInterval * time.Millisecond): - mlog.Debug("CancellationWatcher for Job started polling.", mlog.String("job_id", jobId)) - jobStatus, err := srv.Store.Job().Get(jobId) + c.Logger().Debug("CancellationWatcher for Job started polling.", mlog.String("job_id", jobId)) + jobStatus, err := srv.Store.Job().Get(c, jobId) if err != nil { - mlog.Warn("Error getting job", mlog.String("job_id", jobId), mlog.Err(err)) + c.Logger().Warn("Error getting job", mlog.String("job_id", jobId), mlog.Err(err)) continue } if jobStatus.Status == model.JobStatusCancelRequested { @@ -298,8 +306,8 @@ func (srv *JobServer) CheckForPendingJobsByType(jobType string) (bool, *model.Ap return count > 0, nil } -func (srv *JobServer) GetJobsByTypeAndStatus(jobType string, status string) ([]*model.Job, *model.AppError) { - jobs, err := srv.Store.Job().GetAllByTypeAndStatus(jobType, status) +func (srv *JobServer) GetJobsByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, *model.AppError) { + jobs, err := srv.Store.Job().GetAllByTypeAndStatus(c, jobType, status) if err != nil { return nil, model.NewAppError("GetJobsByTypeAndStatus", "app.job.get_all_jobs_by_type_and_status.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } diff --git a/server/channels/jobs/jobs_test.go b/server/channels/jobs/jobs_test.go index a27208fb2f..964c0a31ac 100644 --- a/server/channels/jobs/jobs_test.go +++ b/server/channels/jobs/jobs_test.go @@ -9,9 +9,12 @@ import ( "net/http" "testing" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/channels/store/storetest" "github.com/mattermost/mattermost/server/v8/channels/utils/testutils" @@ -54,7 +57,7 @@ func makeTeamEditionJobServer(t *testing.T) (*JobServer, *storetest.Store) { mockStore.AssertExpectations(t) }) - jobServer := NewJobServer(configService, mockStore, nil) + jobServer := NewJobServer(configService, mockStore, nil, mlog.CreateConsoleTestLogger(t)) return jobServer, mockStore } @@ -498,11 +501,13 @@ func TestUpdateInProgressJobData(t *testing.T) { func TestHandleJobPanic(t *testing.T) { t.Run("no panic", func(t *testing.T) { jobServer, _, _ := makeJobServer(t) + logger := mlog.CreateConsoleTestLogger(t) job := &model.Job{ Type: model.JobTypeImportProcess, Status: model.JobStatusInProgress, } + job.InitLogger(logger) f := func() { defer jobServer.HandleJobPanic(job) @@ -515,11 +520,13 @@ func TestHandleJobPanic(t *testing.T) { t.Run("with panic string", func(t *testing.T) { jobServer, mockStore, metrics := makeJobServer(t) + logger := mlog.CreateConsoleTestLogger(t) job := &model.Job{ Type: model.JobTypeImportProcess, Status: model.JobStatusInProgress, } + job.InitLogger(logger) f := func() { defer jobServer.HandleJobPanic(job) @@ -535,11 +542,13 @@ func TestHandleJobPanic(t *testing.T) { t.Run("with panic error", func(t *testing.T) { jobServer, mockStore, metrics := makeJobServer(t) + logger := mlog.CreateConsoleTestLogger(t) job := &model.Job{ Type: model.JobTypeImportProcess, Status: model.JobStatusInProgress, } + job.InitLogger(logger) f := func() { defer jobServer.HandleJobPanic(job) @@ -555,12 +564,13 @@ func TestHandleJobPanic(t *testing.T) { } func TestRequestCancellation(t *testing.T) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) t.Run("error cancelling", func(t *testing.T) { jobServer, mockStore, _ := makeJobServer(t) mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(false, &model.AppError{Message: "message"}) - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") expectErrorId(t, "app.job.update.app_error", err) }) @@ -568,9 +578,9 @@ func TestRequestCancellation(t *testing.T) { jobServer, mockStore, _ := makeJobServer(t) mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(true, nil) - mockStore.JobStore.On("Get", "job_id").Return(nil, &store.ErrNotFound{}) + mockStore.JobStore.On("Get", mock.AnythingOfType("*request.Context"), "job_id").Return(nil, &store.ErrNotFound{}) - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") expectErrorId(t, "app.job.update.app_error", err) }) @@ -583,10 +593,10 @@ func TestRequestCancellation(t *testing.T) { } mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(true, nil) - mockStore.JobStore.On("Get", "job_id").Return(job, nil) + mockStore.JobStore.On("Get", mock.AnythingOfType("*request.Context"), "job_id").Return(job, nil) mockMetrics.On("DecrementJobActive", "job_type") - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") require.Nil(t, err) }) @@ -595,7 +605,7 @@ func TestRequestCancellation(t *testing.T) { mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(true, nil) - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") require.Nil(t, err) }) @@ -605,7 +615,7 @@ func TestRequestCancellation(t *testing.T) { mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(false, nil) mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusInProgress, model.JobStatusCancelRequested).Return(false, &model.AppError{Message: "message"}) - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") expectErrorId(t, "app.job.update.app_error", err) }) @@ -615,7 +625,7 @@ func TestRequestCancellation(t *testing.T) { mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(false, nil) mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusInProgress, model.JobStatusCancelRequested).Return(true, nil) - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") require.Nil(t, err) }) @@ -625,7 +635,7 @@ func TestRequestCancellation(t *testing.T) { mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusPending, model.JobStatusCanceled).Return(false, nil) mockStore.JobStore.On("UpdateStatusOptimistically", "job_id", model.JobStatusInProgress, model.JobStatusCancelRequested).Return(false, nil) - err := jobServer.RequestCancellation("job_id") + err := jobServer.RequestCancellation(ctx, "job_id") expectErrorId(t, "jobs.request_cancellation.status.error", err) }) } diff --git a/server/channels/jobs/jobs_watcher.go b/server/channels/jobs/jobs_watcher.go index aea282ff83..58b4dcf432 100644 --- a/server/channels/jobs/jobs_watcher.go +++ b/server/channels/jobs/jobs_watcher.go @@ -9,6 +9,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" ) // Default polling interval for jobs termination. @@ -67,7 +68,7 @@ func (watcher *Watcher) Stop() { } func (watcher *Watcher) PollAndNotify() { - jobs, err := watcher.srv.Store.Job().GetAllByStatus(model.JobStatusPending) + jobs, err := watcher.srv.Store.Job().GetAllByStatus(request.EmptyContext(watcher.srv.logger), model.JobStatusPending) if err != nil { mlog.Error("Error occurred getting all pending statuses.", mlog.Err(err)) return diff --git a/server/channels/jobs/last_accessible_file/scheduler.go b/server/channels/jobs/last_accessible_file/scheduler.go index 8e2000e7a0..0e072cead5 100644 --- a/server/channels/jobs/last_accessible_file/scheduler.go +++ b/server/channels/jobs/last_accessible_file/scheduler.go @@ -14,7 +14,7 @@ import ( const schedFreq = 2 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer, license *model.License) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer, license *model.License) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { enabled := license != nil && *license.Features.Cloud mlog.Debug("Scheduler: isEnabled: "+strconv.FormatBool(enabled), mlog.String("scheduler", model.JobTypeLastAccessibleFile)) diff --git a/server/channels/jobs/last_accessible_file/worker.go b/server/channels/jobs/last_accessible_file/worker.go index 96616b952c..67f9fc22f5 100644 --- a/server/channels/jobs/last_accessible_file/worker.go +++ b/server/channels/jobs/last_accessible_file/worker.go @@ -8,15 +8,13 @@ import ( "github.com/mattermost/mattermost/server/v8/channels/jobs" ) -const ( - JobName = "LastAccessibleFile" -) - type AppIface interface { ComputeLastAccessibleFileTime() error } -func MakeWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) *jobs.SimpleWorker { + const workerName = "LastAccessibleFile" + isEnabled := func(_ *model.Config) bool { return license != nil && *license.Features.Cloud } @@ -25,6 +23,6 @@ func MakeWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) return app.ComputeLastAccessibleFileTime() } - worker := jobs.NewSimpleWorker(JobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/last_accessible_post/scheduler.go b/server/channels/jobs/last_accessible_post/scheduler.go index e819253887..710a778adb 100644 --- a/server/channels/jobs/last_accessible_post/scheduler.go +++ b/server/channels/jobs/last_accessible_post/scheduler.go @@ -14,7 +14,7 @@ import ( const schedFreq = 30 * time.Minute -func MakeScheduler(jobServer *jobs.JobServer, license *model.License) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer, license *model.License) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { enabled := license != nil && *license.Features.Cloud mlog.Debug("Scheduler: isEnabled: "+strconv.FormatBool(enabled), mlog.String("scheduler", model.JobTypeLastAccessiblePost)) diff --git a/server/channels/jobs/last_accessible_post/worker.go b/server/channels/jobs/last_accessible_post/worker.go index b4ee708bec..0f57fb1a08 100644 --- a/server/channels/jobs/last_accessible_post/worker.go +++ b/server/channels/jobs/last_accessible_post/worker.go @@ -8,15 +8,13 @@ import ( "github.com/mattermost/mattermost/server/v8/channels/jobs" ) -const ( - JobName = "LastAccessiblePost" -) - type AppIface interface { ComputeLastAccessiblePostTime() error } -func MakeWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) *jobs.SimpleWorker { + const workerName = "LastAccessiblePost" + isEnabled := func(_ *model.Config) bool { return license != nil && license.Features != nil && *license.Features.Cloud } @@ -25,6 +23,6 @@ func MakeWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) return app.ComputeLastAccessiblePostTime() } - worker := jobs.NewSimpleWorker(JobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/migrations/helper_test.go b/server/channels/jobs/migrations/helper_test.go index 1cca9ae498..f1a1fedb5d 100644 --- a/server/channels/jobs/migrations/helper_test.go +++ b/server/channels/jobs/migrations/helper_test.go @@ -7,7 +7,10 @@ import ( "testing" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" + "github.com/stretchr/testify/require" ) func Setup(tb testing.TB) store.Store { @@ -16,17 +19,15 @@ func Setup(tb testing.TB) store.Store { return store } -func deleteAllJobsByTypeAndMigrationKey(store store.Store, jobType string, migrationKey string) { - jobs, err := store.Job().GetAllByType(model.JobTypeMigrations) - if err != nil { - panic(err) - } +func deleteAllJobsByTypeAndMigrationKey(t *testing.T, store store.Store, jobType string, migrationKey string) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + jobs, err := store.Job().GetAllByType(ctx, model.JobTypeMigrations) + require.NoError(t, err) for _, job := range jobs { if key, ok := job.Data[JobDataKeyMigration]; ok && key == migrationKey { - if _, err = store.Job().Delete(job.Id); err != nil { - panic(err) - } + _, err = store.Job().Delete(job.Id) + require.NoError(t, err) } } } diff --git a/server/channels/jobs/migrations/migrations.go b/server/channels/jobs/migrations/migrations.go index 28124bff73..87d5d6c5ab 100644 --- a/server/channels/jobs/migrations/migrations.go +++ b/server/channels/jobs/migrations/migrations.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" ) @@ -25,12 +26,12 @@ func MakeMigrationsList() []string { } } -func GetMigrationState(migration string, store store.Store) (string, *model.Job, *model.AppError) { +func GetMigrationState(c *request.Context, migration string, store store.Store) (string, *model.Job, *model.AppError) { if _, err := store.System().GetByName(migration); err == nil { return MigrationStateCompleted, nil, nil } - jobs, err := store.Job().GetAllByType(model.JobTypeMigrations) + jobs, err := store.Job().GetAllByType(c, model.JobTypeMigrations) if err != nil { return "", nil, model.NewAppError("GetMigrationState", "app.job.get_all.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } diff --git a/server/channels/jobs/migrations/migrations_test.go b/server/channels/jobs/migrations/migrations_test.go index 5b05c95b61..d30664369b 100644 --- a/server/channels/jobs/migrations/migrations_test.go +++ b/server/channels/jobs/migrations/migrations_test.go @@ -10,6 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" ) func TestGetMigrationState(t *testing.T) { @@ -17,13 +19,14 @@ func TestGetMigrationState(t *testing.T) { t.SkipNow() } store := Setup(t) + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) migrationKey := model.NewId() - deleteAllJobsByTypeAndMigrationKey(store, model.JobTypeMigrations, migrationKey) + deleteAllJobsByTypeAndMigrationKey(t, store, model.JobTypeMigrations, migrationKey) // Test with no job yet. - state, job, err := GetMigrationState(migrationKey, store) + state, job, err := GetMigrationState(ctx, migrationKey, store) assert.Nil(t, err) assert.Nil(t, job) assert.Equal(t, "unscheduled", state) @@ -36,7 +39,7 @@ func TestGetMigrationState(t *testing.T) { nErr := store.System().Save(&system) assert.NoError(t, nErr) - state, job, err = GetMigrationState(migrationKey, store) + state, job, err = GetMigrationState(ctx, migrationKey, store) assert.Nil(t, err) assert.Nil(t, job) assert.Equal(t, "completed", state) @@ -58,7 +61,7 @@ func TestGetMigrationState(t *testing.T) { j1, nErr = store.Job().Save(j1) require.NoError(t, nErr) - state, job, err = GetMigrationState(migrationKey, store) + state, job, err = GetMigrationState(ctx, migrationKey, store) assert.Nil(t, err) assert.Equal(t, j1.Id, job.Id) assert.Equal(t, "in_progress", state) @@ -77,7 +80,7 @@ func TestGetMigrationState(t *testing.T) { j2, nErr = store.Job().Save(j2) require.NoError(t, nErr) - state, job, err = GetMigrationState(migrationKey, store) + state, job, err = GetMigrationState(ctx, migrationKey, store) assert.Nil(t, err) assert.Equal(t, j2.Id, job.Id) assert.Equal(t, "in_progress", state) @@ -96,7 +99,7 @@ func TestGetMigrationState(t *testing.T) { j3, nErr = store.Job().Save(j3) require.NoError(t, nErr) - state, job, err = GetMigrationState(migrationKey, store) + state, job, err = GetMigrationState(ctx, migrationKey, store) assert.Nil(t, err) assert.Equal(t, j3.Id, job.Id) assert.Equal(t, "unscheduled", state) diff --git a/server/channels/jobs/migrations/scheduler.go b/server/channels/jobs/migrations/scheduler.go index f8fb33d1a0..a109246095 100644 --- a/server/channels/jobs/migrations/scheduler.go +++ b/server/channels/jobs/migrations/scheduler.go @@ -8,6 +8,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/jobs" "github.com/mattermost/mattermost/server/v8/channels/store" ) @@ -22,7 +23,9 @@ type Scheduler struct { allMigrationsCompleted bool } -func MakeScheduler(jobServer *jobs.JobServer, store store.Store) model.Scheduler { +var _ jobs.Scheduler = (*Scheduler)(nil) + +func MakeScheduler(jobServer *jobs.JobServer, store store.Store) *Scheduler { return &Scheduler{jobServer, store, false} } @@ -41,27 +44,14 @@ func (scheduler *Scheduler) NextScheduleTime(cfg *model.Config, now time.Time, p } //nolint:unparam -func (scheduler *Scheduler) ScheduleJob(cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) { - mlog.Debug("Scheduling Job", mlog.String("scheduler", model.JobTypeMigrations)) +func (scheduler *Scheduler) ScheduleJob(c *request.Context, cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) { + c.Logger().Debug("Scheduling Job", mlog.String("scheduler", model.JobTypeMigrations)) // Work through the list of migrations in order. Schedule the first one that isn't done (assuming it isn't in progress already). for _, key := range MakeMigrationsList() { - state, job, err := GetMigrationState(key, scheduler.store) + state, job, err := GetMigrationState(c, key, scheduler.store) if err != nil { - mlog.Error("Failed to determine status of migration: ", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("migration_key", key), mlog.Err(err)) - return nil, nil - } - - if state == MigrationStateInProgress { - // Check the migration job isn't wedged. - if job != nil && job.LastActivityAt < model.GetMillis()-MigrationJobWedgedTimeoutMilliseconds && job.CreateAt < model.GetMillis()-MigrationJobWedgedTimeoutMilliseconds { - mlog.Warn("Job appears to be wedged. Rescheduling another instance.", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("wedged_job_id", job.Id), mlog.String("migration_key", key)) - if err := scheduler.jobServer.SetJobError(job, nil); err != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("job_id", job.Id), mlog.Err(err)) - } - return scheduler.createJob(key, job) - } - + c.Logger().Error("Failed to determine status of migration: ", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("migration_key", key), mlog.Err(err)) return nil, nil } @@ -70,23 +60,36 @@ func (scheduler *Scheduler) ScheduleJob(cfg *model.Config, pendingJobs bool, las continue } - if state == MigrationStateUnscheduled { - mlog.Debug("Scheduling a new job for migration.", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("migration_key", key)) - return scheduler.createJob(key, job) + if state == MigrationStateInProgress { + // Check the migration job isn't wedged. + if job != nil && job.LastActivityAt < model.GetMillis()-MigrationJobWedgedTimeoutMilliseconds && job.CreateAt < model.GetMillis()-MigrationJobWedgedTimeoutMilliseconds { + job.Logger.Warn("Job appears to be wedged. Rescheduling another instance.", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("wedged_job_id", job.Id), mlog.String("migration_key", key)) + if err := scheduler.jobServer.SetJobError(job, nil); err != nil { + job.Logger.Error("Worker: Failed to set job error", mlog.String("scheduler", model.JobTypeMigrations), mlog.Err(err)) + } + return scheduler.createJob(c, key, job) + } + + return nil, nil } - mlog.Error("Unknown migration state. Not doing anything.", mlog.String("migration_state", state)) + if state == MigrationStateUnscheduled { + job.Logger.Debug("Scheduling a new job for migration.", mlog.String("scheduler", model.JobTypeMigrations), mlog.String("migration_key", key)) + return scheduler.createJob(c, key, job) + } + + job.Logger.Error("Unknown migration state. Not doing anything.", mlog.String("migration_state", state)) return nil, nil } // If we reached here, then there aren't any migrations left to run. scheduler.allMigrationsCompleted = true - mlog.Debug("All migrations are complete.", mlog.String("scheduler", model.JobTypeMigrations)) + c.Logger().Debug("All migrations are complete.", mlog.String("scheduler", model.JobTypeMigrations)) return nil, nil } -func (scheduler *Scheduler) createJob(migrationKey string, lastJob *model.Job) (*model.Job, *model.AppError) { +func (scheduler *Scheduler) createJob(c *request.Context, migrationKey string, lastJob *model.Job) (*model.Job, *model.AppError) { var lastDone string if lastJob != nil { lastDone = lastJob.Data[JobDataKeyMigrationLastDone] @@ -97,7 +100,7 @@ func (scheduler *Scheduler) createJob(migrationKey string, lastJob *model.Job) ( JobDataKeyMigrationLastDone: lastDone, } - job, err := scheduler.jobServer.CreateJob(model.JobTypeMigrations, data) + job, err := scheduler.jobServer.CreateJob(c, model.JobTypeMigrations, data) if err != nil { return nil, err } diff --git a/server/channels/jobs/migrations/worker.go b/server/channels/jobs/migrations/worker.go index bcbdc6e83c..fbe7a8cc0f 100644 --- a/server/channels/jobs/migrations/worker.go +++ b/server/channels/jobs/migrations/worker.go @@ -11,6 +11,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/jobs" "github.com/mattermost/mattermost/server/v8/channels/store" ) @@ -25,17 +26,20 @@ type Worker struct { stopped chan bool jobs chan model.Job jobServer *jobs.JobServer + logger mlog.LoggerIFace store store.Store closed int32 } -func MakeWorker(jobServer *jobs.JobServer, store store.Store) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, store store.Store) *Worker { + const workerName = "Migrations" worker := Worker{ - name: "Migrations", + name: workerName, stop: make(chan struct{}), stopped: make(chan bool, 1), jobs: make(chan model.Job), jobServer: jobServer, + logger: jobServer.Logger().With(mlog.String("workername", workerName)), store: store, } @@ -47,20 +51,22 @@ func (worker *Worker) Run() { if atomic.CompareAndSwapInt32(&worker.closed, 1, 0) { worker.stop = make(chan struct{}) } - mlog.Debug("Worker started", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker started") defer func() { - mlog.Debug("Worker finished", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker finished") worker.stopped <- true }() for { select { case <-worker.stop: - mlog.Debug("Worker received stop signal", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker received stop signal") return case job := <-worker.jobs: - mlog.Debug("Worker received a new candidate job.", mlog.String("worker", worker.name)) + job.Logger = job.Logger.With(mlog.String("workername", worker.name)) + + job.Logger.Debug("Worker received a new candidate job") worker.DoJob(&job) } } @@ -71,7 +77,7 @@ func (worker *Worker) Stop() { if !atomic.CompareAndSwapInt32(&worker.closed, 0, 1) { return } - mlog.Debug("Worker stopping", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker stopping") close(worker.stop) <-worker.stopped } @@ -88,47 +94,45 @@ func (worker *Worker) DoJob(job *model.Job) { defer worker.jobServer.HandleJobPanic(job) if claimed, err := worker.jobServer.ClaimJob(job); err != nil { - mlog.Info("Worker experienced an error while trying to claim job", - mlog.String("worker", worker.name), - mlog.String("job_id", job.Id), - mlog.String("error", err.Error())) + job.Logger.Info("Worker experienced an error while trying to claim job", mlog.Err(err)) return } else if !claimed { return } + cancelContext := request.EmptyContext(worker.logger) cancelCtx, cancelCancelWatcher := context.WithCancel(context.Background()) cancelWatcherChan := make(chan struct{}, 1) - go worker.jobServer.CancellationWatcher(cancelCtx, job.Id, cancelWatcherChan) - + cancelContext.SetContext(cancelCtx) + go worker.jobServer.CancellationWatcher(cancelContext, job.Id, cancelWatcherChan) defer cancelCancelWatcher() for { select { case <-cancelWatcherChan: - mlog.Debug("Worker: Job has been canceled via CancellationWatcher", mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Debug("Worker: Job has been canceled via CancellationWatcher") worker.setJobCanceled(job) return case <-worker.stop: - mlog.Debug("Worker: Job has been canceled via Worker Stop", mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Debug("Worker: Job has been canceled via Worker Stop") worker.setJobCanceled(job) return case <-time.After(TimeBetweenBatches * time.Millisecond): done, progress, err := worker.runMigration(job.Data[JobDataKeyMigration], job.Data[JobDataKeyMigrationLastDone]) if err != nil { - mlog.Error("Worker: Failed to run migration", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to run migration", mlog.Err(err)) worker.setJobError(job, err) return } else if done { - mlog.Info("Worker: Job is complete", mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: Job is complete") worker.setJobSuccess(job) return } else { job.Data[JobDataKeyMigrationLastDone] = progress if err := worker.jobServer.UpdateInProgressJobData(job); err != nil { - mlog.Error("Worker: Failed to update migration status data for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to update migration status data for job", mlog.Err(err)) worker.setJobError(job, err) return } @@ -139,20 +143,20 @@ func (worker *Worker) DoJob(job *model.Job) { func (worker *Worker) setJobSuccess(job *model.Job) { if err := worker.jobServer.SetJobSuccess(job); err != nil { - mlog.Error("Worker: Failed to set success for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to set success for job", mlog.Err(err)) worker.setJobError(job, err) } } func (worker *Worker) setJobError(job *model.Job, appError *model.AppError) { if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to set job error", mlog.Err(err)) } } func (worker *Worker) setJobCanceled(job *model.Job) { if err := worker.jobServer.SetJobCanceled(job); err != nil { - mlog.Error("Worker: Failed to mark job as canceled", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to mark job as canceled", mlog.Err(err)) } } diff --git a/server/channels/jobs/notify_admin/install_plugin_scheduler.go b/server/channels/jobs/notify_admin/install_plugin_scheduler.go index 55c88bc89d..32c3af4203 100644 --- a/server/channels/jobs/notify_admin/install_plugin_scheduler.go +++ b/server/channels/jobs/notify_admin/install_plugin_scheduler.go @@ -14,7 +14,7 @@ import ( const installPluginSchedFreq = 24 * time.Hour -func MakeInstallPluginScheduler(jobServer *jobs.JobServer, license *model.License, jobType string) model.Scheduler { +func MakeInstallPluginScheduler(jobServer *jobs.JobServer, license *model.License, jobType string) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { enabled := jobType == model.JobTypeInstallPluginNotifyAdmin mlog.Debug("Scheduler: isEnabled: "+strconv.FormatBool(enabled), mlog.String("scheduler", jobType)) diff --git a/server/channels/jobs/notify_admin/scheduler.go b/server/channels/jobs/notify_admin/scheduler.go index 3642ae5cbd..30a7698e09 100644 --- a/server/channels/jobs/notify_admin/scheduler.go +++ b/server/channels/jobs/notify_admin/scheduler.go @@ -14,7 +14,7 @@ import ( const schedFreq = 24 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer, license *model.License, jobType string) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer, license *model.License, jobType string) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { enabled := license != nil && *license.Features.Cloud mlog.Debug("Scheduler: isEnabled: "+strconv.FormatBool(enabled), mlog.String("scheduler", jobType)) diff --git a/server/channels/jobs/notify_admin/worker.go b/server/channels/jobs/notify_admin/worker.go index d73024e7a6..8d5d510c2e 100644 --- a/server/channels/jobs/notify_admin/worker.go +++ b/server/channels/jobs/notify_admin/worker.go @@ -18,7 +18,7 @@ type AppIface interface { DoCheckForAdminNotifications(trial bool) *model.AppError } -func MakeUpgradeNotifyWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) model.Worker { +func MakeUpgradeNotifyWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) *jobs.SimpleWorker { isEnabled := func(_ *model.Config) bool { return license != nil && license.Features != nil && *license.Features.Cloud } @@ -36,7 +36,7 @@ func MakeUpgradeNotifyWorker(jobServer *jobs.JobServer, license *model.License, return worker } -func MakeTrialNotifyWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) model.Worker { +func MakeTrialNotifyWorker(jobServer *jobs.JobServer, license *model.License, app AppIface) *jobs.SimpleWorker { isEnabled := func(_ *model.Config) bool { return license != nil && license.Features != nil && *license.Features.Cloud } @@ -54,7 +54,7 @@ func MakeTrialNotifyWorker(jobServer *jobs.JobServer, license *model.License, ap return worker } -func MakeInstallPluginNotifyWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { +func MakeInstallPluginNotifyWorker(jobServer *jobs.JobServer, app AppIface) *jobs.SimpleWorker { isEnabled := func(_ *model.Config) bool { return true } diff --git a/server/channels/jobs/plugins/scheduler.go b/server/channels/jobs/plugins/scheduler.go index bb52cfb988..8388f2a464 100644 --- a/server/channels/jobs/plugins/scheduler.go +++ b/server/channels/jobs/plugins/scheduler.go @@ -12,7 +12,7 @@ import ( const schedFreq = 24 * time.Hour -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *jobs.PeriodicScheduler { isEnabled := func(cfg *model.Config) bool { return true } diff --git a/server/channels/jobs/plugins/worker.go b/server/channels/jobs/plugins/worker.go index 96fb46ef22..34630fd209 100644 --- a/server/channels/jobs/plugins/worker.go +++ b/server/channels/jobs/plugins/worker.go @@ -19,16 +19,19 @@ type Worker struct { stopped chan bool jobs chan model.Job jobServer *jobs.JobServer + logger mlog.LoggerIFace app AppIface } -func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface) *Worker { + const workerName = "Plugins" worker := Worker{ - name: "Plugins", + name: workerName, stop: make(chan bool, 1), stopped: make(chan bool, 1), jobs: make(chan model.Job), jobServer: jobServer, + logger: jobServer.Logger().With(mlog.String("workername", workerName)), app: app, } @@ -36,27 +39,29 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { } func (worker *Worker) Run() { - mlog.Debug("Worker started", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker started") defer func() { - mlog.Debug("Worker finished", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker finished") worker.stopped <- true }() for { select { case <-worker.stop: - mlog.Debug("Worker received stop signal", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker received stop signal") return case job := <-worker.jobs: - mlog.Debug("Worker received a new candidate job.", mlog.String("worker", worker.name)) + job.Logger = job.Logger.With(mlog.String("workername", worker.name)) + + job.Logger.Debug("Worker received a new candidate job") worker.DoJob(&job) } } } func (worker *Worker) Stop() { - mlog.Debug("Worker stopping", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker stopping") worker.stop <- true <-worker.stopped } @@ -71,34 +76,31 @@ func (worker *Worker) IsEnabled(cfg *model.Config) bool { func (worker *Worker) DoJob(job *model.Job) { if claimed, err := worker.jobServer.ClaimJob(job); err != nil { - mlog.Info("Worker experienced an error while trying to claim job", - mlog.String("worker", worker.name), - mlog.String("job_id", job.Id), - mlog.String("error", err.Error())) + job.Logger.Info("Worker experienced an error while trying to claim job", mlog.Err(err)) return } else if !claimed { return } if err := worker.app.DeleteAllExpiredPluginKeys(); err != nil { - mlog.Error("Worker: Failed to delete expired keys", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to delete expired keys", mlog.Err(err)) worker.setJobError(job, err) return } - mlog.Info("Worker: Job is complete", mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: Job is complete") worker.setJobSuccess(job) } func (worker *Worker) setJobSuccess(job *model.Job) { if err := worker.jobServer.SetJobSuccess(job); err != nil { - mlog.Error("Worker: Failed to set success for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to set success for job", mlog.Err(err)) worker.setJobError(job, err) } } func (worker *Worker) setJobError(job *model.Job, appError *model.AppError) { if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to set job error", mlog.Err(err)) } } diff --git a/server/channels/jobs/post_persistent_notifications/scheduler.go b/server/channels/jobs/post_persistent_notifications/scheduler.go index 0458a3e5e2..43b1b19f84 100644 --- a/server/channels/jobs/post_persistent_notifications/scheduler.go +++ b/server/channels/jobs/post_persistent_notifications/scheduler.go @@ -19,7 +19,7 @@ func (scheduler *Scheduler) NextScheduleTime(cfg *model.Config, _ time.Time, _ b return &nextTime } -func MakeScheduler(jobServer *jobs.JobServer, licenseFunc func() *model.License) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer, licenseFunc func() *model.License) *Scheduler { enabledFunc := func(_ *model.Config) bool { l := licenseFunc() return l != nil && (l.SkuShortName == model.LicenseShortSkuProfessional || l.SkuShortName == model.LicenseShortSkuEnterprise) diff --git a/server/channels/jobs/post_persistent_notifications/worker.go b/server/channels/jobs/post_persistent_notifications/worker.go index ca84b0f07a..d5e279ca43 100644 --- a/server/channels/jobs/post_persistent_notifications/worker.go +++ b/server/channels/jobs/post_persistent_notifications/worker.go @@ -8,16 +8,14 @@ import ( "github.com/mattermost/mattermost/server/v8/channels/jobs" ) -const ( - JobName = "PostPersistentNotifications" -) - type AppIface interface { SendPersistentNotifications() error IsPersistentNotificationsEnabled() bool } -func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface) *jobs.SimpleWorker { + const workerName = "PostPersistentNotifications" + isEnabled := func(_ *model.Config) bool { return app.IsPersistentNotificationsEnabled() } @@ -25,6 +23,6 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { defer jobServer.HandleJobPanic(job) return app.SendPersistentNotifications() } - worker := jobs.NewSimpleWorker(JobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/product_notices/scheduler.go b/server/channels/jobs/product_notices/scheduler.go index 77aa01e8c8..f7160ded48 100644 --- a/server/channels/jobs/product_notices/scheduler.go +++ b/server/channels/jobs/product_notices/scheduler.go @@ -19,7 +19,7 @@ func (scheduler *Scheduler) NextScheduleTime(cfg *model.Config, _ time.Time, pen return &nextTime } -func MakeScheduler(jobServer *jobs.JobServer) model.Scheduler { +func MakeScheduler(jobServer *jobs.JobServer) *Scheduler { isEnabled := func(cfg *model.Config) bool { return *cfg.AnnouncementSettings.AdminNoticesEnabled || *cfg.AnnouncementSettings.UserNoticesEnabled } diff --git a/server/channels/jobs/product_notices/worker.go b/server/channels/jobs/product_notices/worker.go index af1ae15a70..c6a3379df1 100644 --- a/server/channels/jobs/product_notices/worker.go +++ b/server/channels/jobs/product_notices/worker.go @@ -9,13 +9,13 @@ import ( "github.com/mattermost/mattermost/server/v8/channels/jobs" ) -const jobName = "ProductNotices" - type AppIface interface { UpdateProductNotices() *model.AppError } -func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface) *jobs.SimpleWorker { + const workerName = "ProductNotices" + isEnabled := func(cfg *model.Config) bool { return *cfg.AnnouncementSettings.AdminNoticesEnabled || *cfg.AnnouncementSettings.UserNoticesEnabled } @@ -23,11 +23,11 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface) model.Worker { defer jobServer.HandleJobPanic(job) if err := app.UpdateProductNotices(); err != nil { - mlog.Error("Worker: Failed to fetch product notices", mlog.String("worker", model.JobTypeProductNotices), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to fetch product notices", mlog.Err(err)) return err } return nil } - worker := jobs.NewSimpleWorker(jobName, jobServer, execute, isEnabled) + worker := jobs.NewSimpleWorker(workerName, jobServer, execute, isEnabled) return worker } diff --git a/server/channels/jobs/resend_invitation_email/worker.go b/server/channels/jobs/resend_invitation_email/worker.go index 7946ea955a..d87ce925b4 100644 --- a/server/channels/jobs/resend_invitation_email/worker.go +++ b/server/channels/jobs/resend_invitation_email/worker.go @@ -31,18 +31,21 @@ type ResendInvitationEmailWorker struct { stopped chan bool jobs chan model.Job jobServer *jobs.JobServer + logger mlog.LoggerIFace app AppIface store store.Store telemetryService *telemetry.TelemetryService } -func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store, telemetryService *telemetry.TelemetryService) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store, telemetryService *telemetry.TelemetryService) *ResendInvitationEmailWorker { + const workerName = "ResendInvitationEmail" worker := ResendInvitationEmailWorker{ - name: model.JobTypeResendInvitationEmail, + name: workerName, stop: make(chan bool, 1), stopped: make(chan bool, 1), jobs: make(chan model.Job), jobServer: jobServer, + logger: jobServer.Logger().With(mlog.String("workername", workerName)), app: app, store: store, telemetryService: telemetryService, @@ -51,20 +54,20 @@ func MakeWorker(jobServer *jobs.JobServer, app AppIface, store store.Store, tele } func (rseworker *ResendInvitationEmailWorker) Run() { - mlog.Debug("Worker started", mlog.String("worker", rseworker.name)) + rseworker.logger.Debug("Worker started") defer func() { - mlog.Debug("Worker finished", mlog.String("worker", rseworker.name)) + rseworker.logger.Debug("Worker finished") rseworker.stopped <- true }() for { select { case <-rseworker.stop: - mlog.Debug("Worker received stop signal", mlog.String("worker", rseworker.name)) + rseworker.logger.Debug("Worker received stop signal") return case job := <-rseworker.jobs: - mlog.Debug("Worker received a new candidate job.", mlog.String("worker", rseworker.name)) + job.Logger.Debug("Worker received a new candidate job") rseworker.DoJob(&job) } } @@ -75,7 +78,7 @@ func (rseworker *ResendInvitationEmailWorker) IsEnabled(cfg *model.Config) bool } func (rseworker *ResendInvitationEmailWorker) Stop() { - mlog.Debug("Worker stopping", mlog.String("worker", rseworker.name)) + rseworker.logger.Debug("Worker stopping") rseworker.stop <- true <-rseworker.stopped } @@ -96,14 +99,14 @@ func (rseworker *ResendInvitationEmailWorker) DoJob(job *model.Job) { func (rseworker *ResendInvitationEmailWorker) setJobSuccess(job *model.Job) { if err := rseworker.jobServer.SetJobSuccess(job); err != nil { - mlog.Error("Worker: Failed to set success for job", mlog.String("worker", rseworker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to set success for job", mlog.Err(err)) rseworker.setJobError(job, err) } } func (rseworker *ResendInvitationEmailWorker) setJobError(job *model.Job, appError *model.AppError) { if err := rseworker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("worker", rseworker.name), mlog.String("job_id", job.Id), mlog.String("error", err.Error())) + job.Logger.Error("Worker: Failed to set job error", mlog.Err(err)) } } @@ -179,14 +182,14 @@ func (rseworker *ResendInvitationEmailWorker) ResendEmails(job *model.Job, inter emailList, err := rseworker.cleanEmailData(emailListData) if err != nil { appErr := model.NewAppError("worker: "+rseworker.name, "job_id: "+job.Id, nil, "", http.StatusInternalServerError).Wrap(err) - mlog.Error("Worker: Failed to clean emails string data", mlog.String("worker", rseworker.name), mlog.String("job_id", job.Id), mlog.String("error", appErr.Error())) + job.Logger.Error("Worker: Failed to clean emails string data", mlog.Err(appErr)) rseworker.setJobError(job, appErr) } channelList, err := rseworker.cleanChannelsData(channelListData) if err != nil { appErr := model.NewAppError("worker: "+rseworker.name, "job_id: "+job.Id, nil, "", http.StatusInternalServerError).Wrap(err) - mlog.Error("Worker: Failed to clean channel string data", mlog.String("worker", rseworker.name), mlog.String("job_id", job.Id), mlog.String("error", appErr.Error())) + job.Logger.Error("Worker: Failed to clean channel string data", mlog.Err(appErr)) rseworker.setJobError(job, appErr) } @@ -202,7 +205,7 @@ func (rseworker *ResendInvitationEmailWorker) ResendEmails(job *model.Job, inter _, appErr := rseworker.app.InviteNewUsersToTeamGracefully(&memberInvite, teamID, job.Data["senderID"], interval) if appErr != nil { - mlog.Error("Worker: Failed to send emails", mlog.String("worker", rseworker.name), mlog.String("job_id", job.Id), mlog.String("error", appErr.Error())) + job.Logger.Error("Worker: Failed to send emails", mlog.Err(appErr)) rseworker.setJobError(job, appErr) } rseworker.telemetryService.SendTelemetry("track_invite_email_resend", map[string]any{interval: interval}) diff --git a/server/channels/jobs/s3_path_migration/s3_path_migration.go b/server/channels/jobs/s3_path_migration/s3_path_migration.go index 203c379726..dcfde2012d 100644 --- a/server/channels/jobs/s3_path_migration/s3_path_migration.go +++ b/server/channels/jobs/s3_path_migration/s3_path_migration.go @@ -12,20 +12,20 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/jobs" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/platform/shared/filestore" ) const ( - JobName = "S3PathMigration" - timeBetweenBatches = 1 * time.Second ) type S3PathMigrationWorker struct { name string jobServer *jobs.JobServer + logger mlog.LoggerIFace store store.Store fileBackend *filestore.S3FileBackend @@ -34,15 +34,17 @@ type S3PathMigrationWorker struct { jobs chan model.Job } -func MakeWorker(jobServer *jobs.JobServer, store store.Store, fileBackend filestore.FileBackend) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, store store.Store, fileBackend filestore.FileBackend) *S3PathMigrationWorker { // If the type cast fails, it will be nil // which is checked later. s3Backend, _ := fileBackend.(*filestore.S3FileBackend) + const workerName = "S3PathMigration" worker := &S3PathMigrationWorker{ + name: workerName, jobServer: jobServer, + logger: jobServer.Logger().With(mlog.String("workername", workerName)), store: store, fileBackend: s3Backend, - name: JobName, stop: make(chan bool, 1), stopped: make(chan bool, 1), jobs: make(chan model.Job), @@ -51,30 +53,32 @@ func MakeWorker(jobServer *jobs.JobServer, store store.Store, fileBackend filest } func (worker *S3PathMigrationWorker) Run() { - mlog.Debug("Worker started", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker started") // We have to re-assign the stop channel again, because // it might happen that the job was restarted due to a config change. worker.stop = make(chan bool, 1) defer func() { - mlog.Debug("Worker finished", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker finished") worker.stopped <- true }() for { select { case <-worker.stop: - mlog.Debug("Worker received stop signal", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker received stop signal") return case job := <-worker.jobs: - mlog.Debug("Worker received a new candidate job.", mlog.String("worker", worker.name)) + job.Logger = job.Logger.With(mlog.String("workername", worker.name)) + + job.Logger.Debug("Worker received a new candidate job") worker.DoJob(&job) } } } func (worker *S3PathMigrationWorker) Stop() { - mlog.Debug("Worker stopping", mlog.String("worker", worker.name)) + worker.logger.Debug("Worker stopping") close(worker.stop) <-worker.stopped } @@ -104,10 +108,7 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { defer worker.jobServer.HandleJobPanic(job) if claimed, err := worker.jobServer.ClaimJob(job); err != nil { - mlog.Warn("S3PathMigrationWorker experienced an error while trying to claim job", - mlog.String("worker", worker.name), - mlog.String("job_id", job.Id), - mlog.Err(err)) + job.Logger.Warn("S3PathMigrationWorker experienced an error while trying to claim job", mlog.Err(err)) return } else if !claimed { return @@ -115,16 +116,18 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { if worker.fileBackend == nil { err := errors.New("no S3 file backend found") - mlog.Error("S3PathMigrationWorker: ", mlog.Err(err)) + job.Logger.Error("S3PathMigrationWorker: ", mlog.Err(err)) worker.setJobError(job, model.NewAppError("DoJob", model.NoTranslation, nil, "", http.StatusInternalServerError).Wrap(err)) return } + c := request.EmptyContext(worker.logger) + var appErr *model.AppError // We get the job again because ClaimJob changes the job status. - job, appErr = worker.jobServer.GetJob(job.Id) + job, appErr = worker.jobServer.GetJob(c, job.Id) if appErr != nil { - mlog.Error("S3PathMigrationWorker: job execution error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(appErr)) + job.Logger.Error("S3PathMigrationWorker: job execution error", mlog.Err(appErr)) worker.setJobError(job, appErr) return } @@ -135,14 +138,14 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { doneCount, appErr := worker.getJobMetadata(job, "done_file_count") if appErr != nil { - mlog.Error("S3PathMigrationWorker: failed to get done file count", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(appErr)) + job.Logger.Error("S3PathMigrationWorker: failed to get done file count", mlog.Err(appErr)) worker.setJobError(job, appErr) return } startTime, appErr := worker.getJobMetadata(job, "start_create_at") if appErr != nil { - mlog.Error("S3PathMigrationWorker: failed to get start create_at", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(appErr)) + job.Logger.Error("S3PathMigrationWorker: failed to get start create_at", mlog.Err(appErr)) worker.setJobError(job, appErr) return } @@ -158,14 +161,9 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { for { select { case <-worker.stop: - mlog.Info("Worker: S3 Migration has been canceled via Worker Stop. Setting the job back to pending.", - mlog.String("workername", worker.name), - mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: S3 Migration has been canceled via Worker Stop. Setting the job back to pending.") if err := worker.jobServer.SetJobPending(job); err != nil { - mlog.Error("Worker: Failed to mark job as pending", - mlog.String("workername", worker.name), - mlog.String("job_id", job.Id), - mlog.Err(err)) + worker.logger.Error("Worker: Failed to mark job as pending", mlog.Err(err)) } return case <-time.After(timeBetweenBatches): @@ -177,11 +175,11 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { files, err = worker.store.FileInfo().GetFilesBatchForIndexing(int64(startTime), startFileID, true, pageSize) if err != nil { if tries > 3 { - mlog.Error("Worker: Failed to get files after multiple retries. Exiting") + job.Logger.Error("Worker: Failed to get files after multiple retries. Exiting") worker.setJobError(job, model.NewAppError("DoJob", model.NoTranslation, nil, "", http.StatusInternalServerError).Wrap(err)) return } - mlog.Warn("Failed to get file info for s3 migration. Retrying .. ", mlog.Err(err)) + job.Logger.Warn("Failed to get file info for s3 migration. Retrying .. ", mlog.Err(err)) // Wait a bit before trying again. time.Sleep(15 * time.Second) @@ -191,29 +189,29 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { } if len(files) == 0 { - mlog.Info("S3PathMigrationWorker: Job is complete", mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("S3PathMigrationWorker: Job is complete") worker.setJobSuccess(job) - worker.markAsComplete() + worker.markAsComplete(job) return } // Iterate through the rows in each page. for _, f := range files { - mlog.Debug("Processing file ID", mlog.String("id", f.Id), mlog.String("worker", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Debug("Processing file ID", mlog.String("id", f.Id)) // We do not fail the job if a single image failed to encode. if f.Path != "" { if err := worker.fileBackend.DecodeFilePathIfNeeded(f.Path); err != nil { - mlog.Warn("Failed to encode S3 file path", mlog.String("path", f.Path), mlog.String("id", f.Id), mlog.Err(err)) + job.Logger.Warn("Failed to encode S3 file path", mlog.String("path", f.Path), mlog.String("id", f.Id), mlog.Err(err)) } } if f.PreviewPath != "" { if err := worker.fileBackend.DecodeFilePathIfNeeded(f.PreviewPath); err != nil { - mlog.Warn("Failed to encode S3 file path", mlog.String("path", f.PreviewPath), mlog.String("id", f.Id), mlog.Err(err)) + job.Logger.Warn("Failed to encode S3 file path", mlog.String("path", f.PreviewPath), mlog.String("id", f.Id), mlog.Err(err)) } } if f.ThumbnailPath != "" { if err := worker.fileBackend.DecodeFilePathIfNeeded(f.ThumbnailPath); err != nil { - mlog.Warn("Failed to encode S3 file path", mlog.String("path", f.ThumbnailPath), mlog.String("id", f.Id), mlog.Err(err)) + job.Logger.Warn("Failed to encode S3 file path", mlog.String("path", f.ThumbnailPath), mlog.String("id", f.Id), mlog.Err(err)) } } } @@ -234,7 +232,7 @@ func (worker *S3PathMigrationWorker) DoJob(job *model.Job) { } } -func (worker *S3PathMigrationWorker) markAsComplete() { +func (worker *S3PathMigrationWorker) markAsComplete(job *model.Job) { system := model.System{ Name: model.MigrationKeyS3Path, Value: "true", @@ -245,24 +243,24 @@ func (worker *S3PathMigrationWorker) markAsComplete() { // it will just fall through everything because all files would have // converted. The actual job is idempotent, so there won't be a problem. if err := worker.jobServer.Store.System().Save(&system); err != nil { - mlog.Error("Worker: Failed to mark s3 path migration as completed in the systems table.", mlog.String("workername", worker.name), mlog.Err(err)) + job.Logger.Error("Worker: Failed to mark s3 path migration as completed in the systems table.", mlog.Err(err)) } } func (worker *S3PathMigrationWorker) setJobSuccess(job *model.Job) { if err := worker.jobServer.SetJobProgress(job, 100); err != nil { - mlog.Error("Worker: Failed to update progress for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to update progress for job", mlog.Err(err)) worker.setJobError(job, err) } if err := worker.jobServer.SetJobSuccess(job); err != nil { - mlog.Error("S3PathMigrationWorker: Failed to set success for job", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("S3PathMigrationWorker: Failed to set success for job", mlog.Err(err)) worker.setJobError(job, err) } } func (worker *S3PathMigrationWorker) setJobError(job *model.Job, appError *model.AppError) { if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("S3PathMigrationWorker: Failed to set job error", mlog.String("worker", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("S3PathMigrationWorker: Failed to set job error", mlog.Err(err)) } } diff --git a/server/channels/jobs/schedulers.go b/server/channels/jobs/schedulers.go index ce718a0a61..0bfff09fa5 100644 --- a/server/channels/jobs/schedulers.go +++ b/server/channels/jobs/schedulers.go @@ -10,8 +10,15 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" ) +type Scheduler interface { + Enabled(cfg *model.Config) bool + NextScheduleTime(cfg *model.Config, now time.Time, pendingJobs bool, lastSuccessfulJob *model.Job) *time.Time + ScheduleJob(c *request.Context, cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) +} + type Schedulers struct { stop chan bool stopped chan bool @@ -22,7 +29,7 @@ type Schedulers struct { isLeader bool running bool - schedulers map[string]model.Scheduler + schedulers map[string]Scheduler nextRunTimes map[string]*time.Time } @@ -32,7 +39,7 @@ var ( ErrSchedulersUninitialized = errors.New("job schedulers are not initialized") ) -func (schedulers *Schedulers) AddScheduler(name string, scheduler model.Scheduler) { +func (schedulers *Schedulers) AddScheduler(name string, scheduler Scheduler) { schedulers.schedulers[name] = scheduler } @@ -80,7 +87,8 @@ func (schedulers *Schedulers) Start() { if scheduler == nil || !schedulers.isLeader || !scheduler.Enabled(cfg) { continue } - if _, err := schedulers.scheduleJob(cfg, name, scheduler); err != nil { + c := request.EmptyContext(schedulers.jobs.Logger()) + if _, err := schedulers.scheduleJob(c, cfg, name, scheduler); err != nil { mlog.Error("Failed to schedule job", mlog.String("scheduler", name), mlog.Err(err)) continue } @@ -147,7 +155,7 @@ func (schedulers *Schedulers) setNextRunTime(cfg *model.Config, name string, now mlog.Debug("Next run time for scheduler", mlog.String("scheduler_name", name), mlog.String("next_runtime", fmt.Sprintf("%v", schedulers.nextRunTimes[name]))) } -func (schedulers *Schedulers) scheduleJob(cfg *model.Config, name string, scheduler model.Scheduler) (*model.Job, *model.AppError) { +func (schedulers *Schedulers) scheduleJob(c *request.Context, cfg *model.Config, name string, scheduler Scheduler) (*model.Job, *model.AppError) { pendingJobs, err := schedulers.jobs.CheckForPendingJobsByType(name) if err != nil { return nil, err @@ -158,7 +166,7 @@ func (schedulers *Schedulers) scheduleJob(cfg *model.Config, name string, schedu return nil, err } - return scheduler.ScheduleJob(cfg, pendingJobs, lastSuccessfulJob) + return scheduler.ScheduleJob(c, cfg, pendingJobs, lastSuccessfulJob) } func (schedulers *Schedulers) handleConfigChange(_, newConfig *model.Config) { diff --git a/server/channels/jobs/schedulers_test.go b/server/channels/jobs/schedulers_test.go index 0935682937..b813b0af6d 100644 --- a/server/channels/jobs/schedulers_test.go +++ b/server/channels/jobs/schedulers_test.go @@ -12,6 +12,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/plugin/plugintest/mock" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store/storetest" "github.com/mattermost/mattermost/server/v8/channels/utils/testutils" ) @@ -29,7 +30,7 @@ func (scheduler *MockScheduler) NextScheduleTime(cfg *model.Config, now time.Tim return &nextTime } -func (scheduler *MockScheduler) ScheduleJob(cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) { +func (scheduler *MockScheduler) ScheduleJob(c *request.Context, cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) { return nil, nil } diff --git a/server/channels/jobs/server.go b/server/channels/jobs/server.go index 1f80cc5038..e87763319d 100644 --- a/server/channels/jobs/server.go +++ b/server/channels/jobs/server.go @@ -8,6 +8,7 @@ import ( "time" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/einterfaces" "github.com/mattermost/mattermost/server/v8/platform/services/configservice" @@ -17,6 +18,7 @@ type JobServer struct { ConfigService configservice.ConfigService Store store.Store metrics einterfaces.MetricsInterface + logger mlog.LoggerIFace // mut is used to protect the following fields from concurrent access. mut sync.Mutex @@ -24,11 +26,12 @@ type JobServer struct { schedulers *Schedulers } -func NewJobServer(configService configservice.ConfigService, store store.Store, metrics einterfaces.MetricsInterface) *JobServer { +func NewJobServer(configService configservice.ConfigService, store store.Store, metrics einterfaces.MetricsInterface, logger mlog.LoggerIFace) *JobServer { srv := &JobServer{ ConfigService: configService, Store: store, metrics: metrics, + logger: logger, } srv.initWorkers() srv.initSchedulers() @@ -47,7 +50,7 @@ func (srv *JobServer) initSchedulers() { clusterLeaderChanged: make(chan bool, 1), jobs: srv, isLeader: true, - schedulers: make(map[string]model.Scheduler), + schedulers: make(map[string]Scheduler), nextRunTimes: make(map[string]*time.Time), } @@ -58,7 +61,11 @@ func (srv *JobServer) Config() *model.Config { return srv.ConfigService.Config() } -func (srv *JobServer) RegisterJobType(name string, worker model.Worker, scheduler model.Scheduler) { +func (srv *JobServer) Logger() mlog.LoggerIFace { + return srv.logger +} + +func (srv *JobServer) RegisterJobType(name string, worker model.Worker, scheduler Scheduler) { srv.mut.Lock() defer srv.mut.Unlock() if worker != nil { diff --git a/server/channels/store/layer_generators/opentracing_layer.go.tmpl b/server/channels/store/layer_generators/opentracing_layer.go.tmpl index 02d7408df9..f540421a87 100644 --- a/server/channels/store/layer_generators/opentracing_layer.go.tmpl +++ b/server/channels/store/layer_generators/opentracing_layer.go.tmpl @@ -10,6 +10,7 @@ import ( "context" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/platform/services/tracing" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/opentracing/opentracing-go/ext" diff --git a/server/channels/store/layer_generators/retry_layer.go.tmpl b/server/channels/store/layer_generators/retry_layer.go.tmpl index dd8fdac7b2..bb6d69e4d8 100644 --- a/server/channels/store/layer_generators/retry_layer.go.tmpl +++ b/server/channels/store/layer_generators/retry_layer.go.tmpl @@ -10,11 +10,13 @@ import ( "context" timepkg "time" - "github.com/lib/pq" - "github.com/mattermost/mattermost/server/public/model" - "github.com/mattermost/mattermost/server/v8/channels/store" - "github.com/pkg/errors" "github.com/go-sql-driver/mysql" + "github.com/lib/pq" + "github.com/pkg/errors" + + "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" + "github.com/mattermost/mattermost/server/v8/channels/store" ) const mySQLDeadlockCode = uint16(1213) diff --git a/server/channels/store/layer_generators/timer_layer.go.tmpl b/server/channels/store/layer_generators/timer_layer.go.tmpl index daef4c61da..f51c50e093 100644 --- a/server/channels/store/layer_generators/timer_layer.go.tmpl +++ b/server/channels/store/layer_generators/timer_layer.go.tmpl @@ -10,9 +10,10 @@ import ( "context" "time" - "github.com/mattermost/mattermost/server/v8/einterfaces" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" + "github.com/mattermost/mattermost/server/v8/einterfaces" ) type {{.Name}} struct { diff --git a/server/channels/store/opentracinglayer/opentracinglayer.go b/server/channels/store/opentracinglayer/opentracinglayer.go index 5abf73ee37..69be23fcd5 100644 --- a/server/channels/store/opentracinglayer/opentracinglayer.go +++ b/server/channels/store/opentracinglayer/opentracinglayer.go @@ -10,6 +10,7 @@ import ( "context" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/platform/services/tracing" "github.com/opentracing/opentracing-go/ext" @@ -4926,7 +4927,7 @@ func (s *OpenTracingLayerJobStore) Delete(id string) (string, error) { return result, err } -func (s *OpenTracingLayerJobStore) Get(id string) (*model.Job, error) { +func (s *OpenTracingLayerJobStore) Get(c *request.Context, id string) (*model.Job, error) { origCtx := s.Root.Store.Context() span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.Get") s.Root.Store.SetContext(newCtx) @@ -4935,7 +4936,7 @@ func (s *OpenTracingLayerJobStore) Get(id string) (*model.Job, error) { }() defer span.Finish() - result, err := s.JobStore.Get(id) + result, err := s.JobStore.Get(c, id) if err != nil { span.LogFields(spanlog.Error(err)) ext.Error.Set(span, true) @@ -4944,7 +4945,7 @@ func (s *OpenTracingLayerJobStore) Get(id string) (*model.Job, error) { return result, err } -func (s *OpenTracingLayerJobStore) GetAllByStatus(status string) ([]*model.Job, error) { +func (s *OpenTracingLayerJobStore) GetAllByStatus(c *request.Context, status string) ([]*model.Job, error) { origCtx := s.Root.Store.Context() span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.GetAllByStatus") s.Root.Store.SetContext(newCtx) @@ -4953,7 +4954,7 @@ func (s *OpenTracingLayerJobStore) GetAllByStatus(status string) ([]*model.Job, }() defer span.Finish() - result, err := s.JobStore.GetAllByStatus(status) + result, err := s.JobStore.GetAllByStatus(c, status) if err != nil { span.LogFields(spanlog.Error(err)) ext.Error.Set(span, true) @@ -4962,7 +4963,7 @@ func (s *OpenTracingLayerJobStore) GetAllByStatus(status string) ([]*model.Job, return result, err } -func (s *OpenTracingLayerJobStore) GetAllByType(jobType string) ([]*model.Job, error) { +func (s *OpenTracingLayerJobStore) GetAllByType(c *request.Context, jobType string) ([]*model.Job, error) { origCtx := s.Root.Store.Context() span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.GetAllByType") s.Root.Store.SetContext(newCtx) @@ -4971,7 +4972,7 @@ func (s *OpenTracingLayerJobStore) GetAllByType(jobType string) ([]*model.Job, e }() defer span.Finish() - result, err := s.JobStore.GetAllByType(jobType) + result, err := s.JobStore.GetAllByType(c, jobType) if err != nil { span.LogFields(spanlog.Error(err)) ext.Error.Set(span, true) @@ -4980,7 +4981,7 @@ func (s *OpenTracingLayerJobStore) GetAllByType(jobType string) ([]*model.Job, e return result, err } -func (s *OpenTracingLayerJobStore) GetAllByTypeAndStatus(jobType string, status string) ([]*model.Job, error) { +func (s *OpenTracingLayerJobStore) GetAllByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, error) { origCtx := s.Root.Store.Context() span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.GetAllByTypeAndStatus") s.Root.Store.SetContext(newCtx) @@ -4989,7 +4990,7 @@ func (s *OpenTracingLayerJobStore) GetAllByTypeAndStatus(jobType string, status }() defer span.Finish() - result, err := s.JobStore.GetAllByTypeAndStatus(jobType, status) + result, err := s.JobStore.GetAllByTypeAndStatus(c, jobType, status) if err != nil { span.LogFields(spanlog.Error(err)) ext.Error.Set(span, true) @@ -4998,7 +4999,7 @@ func (s *OpenTracingLayerJobStore) GetAllByTypeAndStatus(jobType string, status return result, err } -func (s *OpenTracingLayerJobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) { +func (s *OpenTracingLayerJobStore) GetAllByTypePage(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, error) { origCtx := s.Root.Store.Context() span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.GetAllByTypePage") s.Root.Store.SetContext(newCtx) @@ -5007,7 +5008,7 @@ func (s *OpenTracingLayerJobStore) GetAllByTypePage(jobType string, offset int, }() defer span.Finish() - result, err := s.JobStore.GetAllByTypePage(jobType, offset, limit) + result, err := s.JobStore.GetAllByTypePage(c, jobType, offset, limit) if err != nil { span.LogFields(spanlog.Error(err)) ext.Error.Set(span, true) @@ -5016,7 +5017,7 @@ func (s *OpenTracingLayerJobStore) GetAllByTypePage(jobType string, offset int, return result, err } -func (s *OpenTracingLayerJobStore) GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) { +func (s *OpenTracingLayerJobStore) GetAllByTypesPage(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, error) { origCtx := s.Root.Store.Context() span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.GetAllByTypesPage") s.Root.Store.SetContext(newCtx) @@ -5025,25 +5026,7 @@ func (s *OpenTracingLayerJobStore) GetAllByTypesPage(jobTypes []string, offset i }() defer span.Finish() - result, err := s.JobStore.GetAllByTypesPage(jobTypes, offset, limit) - if err != nil { - span.LogFields(spanlog.Error(err)) - ext.Error.Set(span, true) - } - - return result, err -} - -func (s *OpenTracingLayerJobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) { - origCtx := s.Root.Store.Context() - span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "JobStore.GetAllPage") - s.Root.Store.SetContext(newCtx) - defer func() { - s.Root.Store.SetContext(origCtx) - }() - - defer span.Finish() - result, err := s.JobStore.GetAllPage(offset, limit) + result, err := s.JobStore.GetAllByTypesPage(c, jobTypes, offset, limit) if err != nil { span.LogFields(spanlog.Error(err)) ext.Error.Set(span, true) diff --git a/server/channels/store/retrylayer/retrylayer.go b/server/channels/store/retrylayer/retrylayer.go index e3713f8289..e74f4973cb 100644 --- a/server/channels/store/retrylayer/retrylayer.go +++ b/server/channels/store/retrylayer/retrylayer.go @@ -12,9 +12,11 @@ import ( "github.com/go-sql-driver/mysql" "github.com/lib/pq" - "github.com/mattermost/mattermost/server/public/model" - "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/pkg/errors" + + "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" + "github.com/mattermost/mattermost/server/v8/channels/store" ) const mySQLDeadlockCode = uint16(1213) @@ -5561,11 +5563,11 @@ func (s *RetryLayerJobStore) Delete(id string) (string, error) { } -func (s *RetryLayerJobStore) Get(id string) (*model.Job, error) { +func (s *RetryLayerJobStore) Get(c *request.Context, id string) (*model.Job, error) { tries := 0 for { - result, err := s.JobStore.Get(id) + result, err := s.JobStore.Get(c, id) if err == nil { return result, nil } @@ -5582,11 +5584,11 @@ func (s *RetryLayerJobStore) Get(id string) (*model.Job, error) { } -func (s *RetryLayerJobStore) GetAllByStatus(status string) ([]*model.Job, error) { +func (s *RetryLayerJobStore) GetAllByStatus(c *request.Context, status string) ([]*model.Job, error) { tries := 0 for { - result, err := s.JobStore.GetAllByStatus(status) + result, err := s.JobStore.GetAllByStatus(c, status) if err == nil { return result, nil } @@ -5603,11 +5605,11 @@ func (s *RetryLayerJobStore) GetAllByStatus(status string) ([]*model.Job, error) } -func (s *RetryLayerJobStore) GetAllByType(jobType string) ([]*model.Job, error) { +func (s *RetryLayerJobStore) GetAllByType(c *request.Context, jobType string) ([]*model.Job, error) { tries := 0 for { - result, err := s.JobStore.GetAllByType(jobType) + result, err := s.JobStore.GetAllByType(c, jobType) if err == nil { return result, nil } @@ -5624,11 +5626,11 @@ func (s *RetryLayerJobStore) GetAllByType(jobType string) ([]*model.Job, error) } -func (s *RetryLayerJobStore) GetAllByTypeAndStatus(jobType string, status string) ([]*model.Job, error) { +func (s *RetryLayerJobStore) GetAllByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, error) { tries := 0 for { - result, err := s.JobStore.GetAllByTypeAndStatus(jobType, status) + result, err := s.JobStore.GetAllByTypeAndStatus(c, jobType, status) if err == nil { return result, nil } @@ -5645,11 +5647,11 @@ func (s *RetryLayerJobStore) GetAllByTypeAndStatus(jobType string, status string } -func (s *RetryLayerJobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) { +func (s *RetryLayerJobStore) GetAllByTypePage(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, error) { tries := 0 for { - result, err := s.JobStore.GetAllByTypePage(jobType, offset, limit) + result, err := s.JobStore.GetAllByTypePage(c, jobType, offset, limit) if err == nil { return result, nil } @@ -5666,32 +5668,11 @@ func (s *RetryLayerJobStore) GetAllByTypePage(jobType string, offset int, limit } -func (s *RetryLayerJobStore) GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) { +func (s *RetryLayerJobStore) GetAllByTypesPage(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, error) { tries := 0 for { - result, err := s.JobStore.GetAllByTypesPage(jobTypes, offset, limit) - if err == nil { - return result, nil - } - if !isRepeatableError(err) { - return result, err - } - tries++ - if tries >= 3 { - err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures") - return result, err - } - timepkg.Sleep(100 * timepkg.Millisecond) - } - -} - -func (s *RetryLayerJobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) { - - tries := 0 - for { - result, err := s.JobStore.GetAllPage(offset, limit) + result, err := s.JobStore.GetAllByTypesPage(c, jobTypes, offset, limit) if err == nil { return result, nil } diff --git a/server/channels/store/sqlstore/job_store.go b/server/channels/store/sqlstore/job_store.go index 9b56598682..380910fde8 100644 --- a/server/channels/store/sqlstore/job_store.go +++ b/server/channels/store/sqlstore/job_store.go @@ -16,6 +16,7 @@ import ( "github.com/pkg/errors" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" ) @@ -201,7 +202,7 @@ func (jss SqlJobStore) UpdateStatusOptimistically(id string, currentStatus strin return true, nil } -func (jss SqlJobStore) Get(id string) (*model.Job, error) { +func (jss SqlJobStore) Get(c *request.Context, id string) (*model.Job, error) { query, args, err := jss.getQueryBuilder(). Select("*"). From("Jobs"). @@ -209,6 +210,7 @@ func (jss SqlJobStore) Get(id string) (*model.Job, error) { if err != nil { return nil, errors.Wrap(err, "job_tosql") } + var status model.Job if err = jss.GetReplicaX().Get(&status, query, args...); err != nil { if err == sql.ErrNoRows { @@ -216,28 +218,13 @@ func (jss SqlJobStore) Get(id string) (*model.Job, error) { } return nil, errors.Wrapf(err, "failed to get Job with id=%s", id) } + + status.InitLogger(c.Logger()) + return &status, nil } -func (jss SqlJobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) { - query, args, err := jss.getQueryBuilder(). - Select("*"). - From("Jobs"). - OrderBy("CreateAt DESC"). - Limit(uint64(limit)). - Offset(uint64(offset)).ToSql() - if err != nil { - return nil, errors.Wrap(err, "job_tosql") - } - - statuses := []*model.Job{} - if err = jss.GetReplicaX().Select(&statuses, query, args...); err != nil { - return nil, errors.Wrap(err, "failed to find Jobs") - } - return statuses, nil -} - -func (jss SqlJobStore) GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) { +func (jss SqlJobStore) GetAllByTypesPage(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, error) { query, args, err := jss.getQueryBuilder(). Select("*"). From("Jobs"). @@ -253,10 +240,14 @@ func (jss SqlJobStore) GetAllByTypesPage(jobTypes []string, offset int, limit in if err = jss.GetReplicaX().Select(&jobs, query, args...); err != nil { return nil, errors.Wrapf(err, "failed to find Jobs with types") } + for _, j := range jobs { + j.InitLogger(c.Logger()) + } + return jobs, nil } -func (jss SqlJobStore) GetAllByType(jobType string) ([]*model.Job, error) { +func (jss SqlJobStore) GetAllByType(c *request.Context, jobType string) ([]*model.Job, error) { query, args, err := jss.getQueryBuilder(). Select("*"). From("Jobs"). @@ -265,14 +256,19 @@ func (jss SqlJobStore) GetAllByType(jobType string) ([]*model.Job, error) { if err != nil { return nil, errors.Wrap(err, "job_tosql") } + statuses := []*model.Job{} if err = jss.GetReplicaX().Select(&statuses, query, args...); err != nil { return nil, errors.Wrapf(err, "failed to find Jobs with type=%s", jobType) } + for _, j := range statuses { + j.InitLogger(c.Logger()) + } + return statuses, nil } -func (jss SqlJobStore) GetAllByTypeAndStatus(jobType string, status string) ([]*model.Job, error) { +func (jss SqlJobStore) GetAllByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, error) { query, args, err := jss.getQueryBuilder(). Select("*"). From("Jobs"). @@ -281,14 +277,19 @@ func (jss SqlJobStore) GetAllByTypeAndStatus(jobType string, status string) ([]* if err != nil { return nil, errors.Wrap(err, "job_tosql") } + jobs := []*model.Job{} if err = jss.GetReplicaX().Select(&jobs, query, args...); err != nil { return nil, errors.Wrapf(err, "failed to find Jobs with type=%s", jobType) } + for _, j := range jobs { + j.InitLogger(c.Logger()) + } + return jobs, nil } -func (jss SqlJobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) { +func (jss SqlJobStore) GetAllByTypePage(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, error) { query, args, err := jss.getQueryBuilder(). Select("*"). From("Jobs"). @@ -304,10 +305,14 @@ func (jss SqlJobStore) GetAllByTypePage(jobType string, offset int, limit int) ( if err = jss.GetReplicaX().Select(&statuses, query, args...); err != nil { return nil, errors.Wrapf(err, "failed to find Jobs with type=%s", jobType) } + for _, j := range statuses { + j.InitLogger(c.Logger()) + } + return statuses, nil } -func (jss SqlJobStore) GetAllByStatus(status string) ([]*model.Job, error) { +func (jss SqlJobStore) GetAllByStatus(c *request.Context, status string) ([]*model.Job, error) { statuses := []*model.Job{} query, args, err := jss.getQueryBuilder(). Select("*"). @@ -321,6 +326,10 @@ func (jss SqlJobStore) GetAllByStatus(status string) ([]*model.Job, error) { if err = jss.GetReplicaX().Select(&statuses, query, args...); err != nil { return nil, errors.Wrapf(err, "failed to find Jobs with status=%s", status) } + for _, j := range statuses { + j.InitLogger(c.Logger()) + } + return statuses, nil } diff --git a/server/channels/store/store.go b/server/channels/store/store.go index 148e81cadd..a04a84ad3f 100644 --- a/server/channels/store/store.go +++ b/server/channels/store/store.go @@ -11,6 +11,7 @@ import ( "time" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/product" ) @@ -734,13 +735,12 @@ type JobStore interface { UpdateOptimistically(job *model.Job, currentStatus string) (bool, error) UpdateStatus(id string, status string) (*model.Job, error) UpdateStatusOptimistically(id string, currentStatus string, newStatus string) (bool, error) - Get(id string) (*model.Job, error) - GetAllPage(offset int, limit int) ([]*model.Job, error) - GetAllByType(jobType string) ([]*model.Job, error) - GetAllByTypeAndStatus(jobType string, status string) ([]*model.Job, error) - GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) - GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) - GetAllByStatus(status string) ([]*model.Job, error) + Get(c *request.Context, id string) (*model.Job, error) + GetAllByType(c *request.Context, jobType string) ([]*model.Job, error) + GetAllByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, error) + GetAllByTypePage(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, error) + GetAllByTypesPage(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, error) + GetAllByStatus(c *request.Context, status string) ([]*model.Job, error) GetNewestJobByStatusAndType(status string, jobType string) (*model.Job, error) GetNewestJobByStatusesAndType(statuses []string, jobType string) (*model.Job, error) GetCountByStatusAndType(status string, jobType string) (int64, error) diff --git a/server/channels/store/storetest/job_store.go b/server/channels/store/storetest/job_store.go index 2f7557aca2..52b8571da5 100644 --- a/server/channels/store/storetest/job_store.go +++ b/server/channels/store/storetest/job_store.go @@ -7,15 +7,15 @@ import ( "errors" "sync" "testing" - "time" "github.com/lib/pq" + "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" + "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/mattermost/mattermost/server/public/model" - "github.com/mattermost/mattermost/server/v8/channels/store" ) func TestJobStore(t *testing.T, ss store.Store) { @@ -25,7 +25,6 @@ func TestJobStore(t *testing.T, ss store.Store) { t.Run("JobGetAllByTypeAndStatus", func(t *testing.T) { testJobGetAllByTypeAndStatus(t, ss) }) t.Run("JobGetAllByTypePage", func(t *testing.T) { testJobGetAllByTypePage(t, ss) }) t.Run("JobGetAllByTypesPage", func(t *testing.T) { testJobGetAllByTypesPage(t, ss) }) - t.Run("JobGetAllPage", func(t *testing.T) { testJobGetAllPage(t, ss) }) t.Run("JobGetAllByStatus", func(t *testing.T) { testJobGetAllByStatus(t, ss) }) t.Run("GetNewestJobByStatusAndType", func(t *testing.T) { testJobStoreGetNewestJobByStatusAndType(t, ss) }) t.Run("GetNewestJobByStatusesAndType", func(t *testing.T) { testJobStoreGetNewestJobByStatusesAndType(t, ss) }) @@ -37,6 +36,8 @@ func TestJobStore(t *testing.T, ss store.Store) { } func testJobSaveGet(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + job := &model.Job{ Id: model.NewId(), Type: model.NewId(), @@ -53,7 +54,7 @@ func testJobSaveGet(t *testing.T, ss store.Store) { defer ss.Job().Delete(job.Id) - received, err := ss.Job().Get(job.Id) + received, err := ss.Job().Get(ctx, job.Id) require.NoError(t, err) require.Equal(t, job.Id, received.Id, "received incorrect job after save") require.Equal(t, "12345", received.Data["Total"]) @@ -105,6 +106,8 @@ func testJobSaveOnce(t *testing.T, ss store.Store) { } func testJobGetAllByType(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + jobType := model.NewId() jobs := []*model.Job{ @@ -128,13 +131,15 @@ func testJobGetAllByType(t *testing.T, ss store.Store) { defer ss.Job().Delete(job.Id) } - received, err := ss.Job().GetAllByType(jobType) + received, err := ss.Job().GetAllByType(ctx, jobType) require.NoError(t, err) require.Len(t, received, 2) require.ElementsMatch(t, []string{jobs[0].Id, jobs[1].Id}, []string{received[0].Id, received[1].Id}) } func testJobGetAllByTypeAndStatus(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + jobType := model.NewId() jobs := []*model.Job{ @@ -156,13 +161,15 @@ func testJobGetAllByTypeAndStatus(t *testing.T, ss store.Store) { defer ss.Job().Delete(job.Id) } - received, err := ss.Job().GetAllByTypeAndStatus(jobType, model.JobStatusPending) + received, err := ss.Job().GetAllByTypeAndStatus(ctx, jobType, model.JobStatusPending) require.NoError(t, err) require.Len(t, received, 2) require.ElementsMatch(t, []string{jobs[0].Id, jobs[1].Id}, []string{received[0].Id, received[1].Id}) } func testJobGetAllByTypePage(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + jobType := model.NewId() jobs := []*model.Job{ @@ -194,19 +201,21 @@ func testJobGetAllByTypePage(t *testing.T, ss store.Store) { defer ss.Job().Delete(job.Id) } - received, err := ss.Job().GetAllByTypePage(jobType, 0, 2) + received, err := ss.Job().GetAllByTypePage(ctx, jobType, 0, 2) require.NoError(t, err) require.Len(t, received, 2) require.Equal(t, received[0].Id, jobs[2].Id, "should've received newest job first") require.Equal(t, received[1].Id, jobs[0].Id, "should've received second newest job second") - received, err = ss.Job().GetAllByTypePage(jobType, 2, 2) + received, err = ss.Job().GetAllByTypePage(ctx, jobType, 2, 2) require.NoError(t, err) require.Len(t, received, 1) require.Equal(t, received[0].Id, jobs[1].Id, "should've received oldest job last") } func testJobGetAllByTypesPage(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + jobType := model.NewId() jobType2 := model.NewId() @@ -241,7 +250,7 @@ func testJobGetAllByTypesPage(t *testing.T, ss store.Store) { // test return all jobTypes := []string{jobType, jobType2} - received, err := ss.Job().GetAllByTypesPage(jobTypes, 0, 4) + received, err := ss.Job().GetAllByTypesPage(ctx, jobTypes, 0, 4) require.NoError(t, err) require.Len(t, received, 3) require.Equal(t, received[0].Id, jobs[2].Id, "should've received newest job first") @@ -249,59 +258,21 @@ func testJobGetAllByTypesPage(t *testing.T, ss store.Store) { // test paging jobTypes = []string{jobType, jobType2} - received, err = ss.Job().GetAllByTypesPage(jobTypes, 0, 2) + received, err = ss.Job().GetAllByTypesPage(ctx, jobTypes, 0, 2) require.NoError(t, err) require.Len(t, received, 2) require.Equal(t, received[0].Id, jobs[2].Id, "should've received newest job first") require.Equal(t, received[1].Id, jobs[0].Id, "should've received second newest job second") - received, err = ss.Job().GetAllByTypesPage(jobTypes, 2, 2) + received, err = ss.Job().GetAllByTypesPage(ctx, jobTypes, 2, 2) require.NoError(t, err) require.Len(t, received, 1) require.Equal(t, received[0].Id, jobs[1].Id, "should've received oldest job last") } -func testJobGetAllPage(t *testing.T, ss store.Store) { - jobType := model.NewId() - createAtTime := model.GetMillis() - - jobs := []*model.Job{ - { - Id: model.NewId(), - Type: jobType, - CreateAt: createAtTime + 1, - }, - { - Id: model.NewId(), - Type: jobType, - CreateAt: createAtTime, - }, - { - Id: model.NewId(), - Type: jobType, - CreateAt: createAtTime + 2, - }, - } - - for _, job := range jobs { - _, err := ss.Job().Save(job) - require.NoError(t, err) - defer ss.Job().Delete(job.Id) - } - - received, err := ss.Job().GetAllPage(0, 2) - require.NoError(t, err) - require.Len(t, received, 2) - require.Equal(t, received[0].Id, jobs[2].Id, "should've received newest job first") - require.Equal(t, received[1].Id, jobs[0].Id, "should've received second newest job second") - - received, err = ss.Job().GetAllPage(2, 2) - require.NoError(t, err) - require.NotEmpty(t, received) - require.Equal(t, received[0].Id, jobs[1].Id, "should've received oldest job last") -} - func testJobGetAllByStatus(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + jobType := model.NewId() status := model.NewId() @@ -341,7 +312,7 @@ func testJobGetAllByStatus(t *testing.T, ss store.Store) { defer ss.Job().Delete(job.Id) } - received, err := ss.Job().GetAllByStatus(status) + received, err := ss.Job().GetAllByStatus(ctx, status) require.NoError(t, err) require.Len(t, received, 3) require.Equal(t, received[0].Id, jobs[1].Id) @@ -521,6 +492,8 @@ func testJobStoreGetCountByStatusAndType(t *testing.T, ss store.Store) { } func testJobUpdateOptimistically(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + job := &model.Job{ Id: model.NewId(), Type: model.JobTypeDataRetention, @@ -548,7 +521,7 @@ func testJobUpdateOptimistically(t *testing.T, ss store.Store) { require.NoError(t, err) require.True(t, updated) - updatedJob, err := ss.Job().Get(job.Id) + updatedJob, err := ss.Job().Get(ctx, job.Id) require.NoError(t, err) require.Equal(t, updatedJob.Type, job.Type) @@ -560,6 +533,7 @@ func testJobUpdateOptimistically(t *testing.T, ss store.Store) { } func testJobUpdateStatusUpdateStatusOptimistically(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) job := &model.Job{ Id: model.NewId(), Type: model.JobTypeDataRetention, @@ -589,7 +563,7 @@ func testJobUpdateStatusUpdateStatusOptimistically(t *testing.T, ss store.Store) require.NoError(t, err) require.False(t, updated) - received, err = ss.Job().Get(job.Id) + received, err = ss.Job().Get(ctx, job.Id) require.NoError(t, err) require.Equal(t, model.JobStatusPending, received.Status) @@ -602,7 +576,7 @@ func testJobUpdateStatusUpdateStatusOptimistically(t *testing.T, ss store.Store) require.True(t, updated, "should have succeeded") var startAtSet int64 - received, err = ss.Job().Get(job.Id) + received, err = ss.Job().Get(ctx, job.Id) require.NoError(t, err) require.Equal(t, model.JobStatusInProgress, received.Status) require.NotEqual(t, 0, received.StartAt) @@ -616,7 +590,7 @@ func testJobUpdateStatusUpdateStatusOptimistically(t *testing.T, ss store.Store) require.NoError(t, err) require.True(t, updated, "should have succeeded") - received, err = ss.Job().Get(job.Id) + received, err = ss.Job().Get(ctx, job.Id) require.NoError(t, err) require.Equal(t, model.JobStatusSuccess, received.Status) require.Equal(t, startAtSet, received.StartAt) @@ -632,6 +606,8 @@ func testJobDelete(t *testing.T, ss store.Store) { } func testJobCleanup(t *testing.T, ss store.Store) { + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) + now := model.GetMillis() ids := make([]string, 0, 10) for i := 0; i < 10; i++ { @@ -645,7 +621,7 @@ func testJobCleanup(t *testing.T, ss store.Store) { defer ss.Job().Delete(job.Id) } - jobs, err := ss.Job().GetAllByStatus(model.JobStatusPending) + jobs, err := ss.Job().GetAllByStatus(ctx, model.JobStatusPending) require.NoError(t, err) assert.Len(t, jobs, 10) @@ -653,7 +629,7 @@ func testJobCleanup(t *testing.T, ss store.Store) { require.NoError(t, err) // Should not clean up pending jobs - jobs, err = ss.Job().GetAllByStatus(model.JobStatusPending) + jobs, err = ss.Job().GetAllByStatus(ctx, model.JobStatusPending) require.NoError(t, err) assert.Len(t, jobs, 10) @@ -666,7 +642,7 @@ func testJobCleanup(t *testing.T, ss store.Store) { require.NoError(t, err) // Should clean up now - jobs, err = ss.Job().GetAllByStatus(model.JobStatusSuccess) + jobs, err = ss.Job().GetAllByStatus(ctx, model.JobStatusSuccess) require.NoError(t, err) assert.Len(t, jobs, 0) } diff --git a/server/channels/store/storetest/mocks/JobStore.go b/server/channels/store/storetest/mocks/JobStore.go index c008f34339..bd3e3e1745 100644 --- a/server/channels/store/storetest/mocks/JobStore.go +++ b/server/channels/store/storetest/mocks/JobStore.go @@ -6,6 +6,7 @@ package mocks import ( model "github.com/mattermost/mattermost/server/public/model" + request "github.com/mattermost/mattermost/server/public/shared/request" mock "github.com/stretchr/testify/mock" ) @@ -52,25 +53,25 @@ func (_m *JobStore) Delete(id string) (string, error) { return r0, r1 } -// Get provides a mock function with given fields: id -func (_m *JobStore) Get(id string) (*model.Job, error) { - ret := _m.Called(id) +// Get provides a mock function with given fields: c, id +func (_m *JobStore) Get(c *request.Context, id string) (*model.Job, error) { + ret := _m.Called(c, id) var r0 *model.Job var r1 error - if rf, ok := ret.Get(0).(func(string) (*model.Job, error)); ok { - return rf(id) + if rf, ok := ret.Get(0).(func(*request.Context, string) (*model.Job, error)); ok { + return rf(c, id) } - if rf, ok := ret.Get(0).(func(string) *model.Job); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(*request.Context, string) *model.Job); ok { + r0 = rf(c, id) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.Job) } } - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(*request.Context, string) error); ok { + r1 = rf(c, id) } else { r1 = ret.Error(1) } @@ -78,25 +79,25 @@ func (_m *JobStore) Get(id string) (*model.Job, error) { return r0, r1 } -// GetAllByStatus provides a mock function with given fields: status -func (_m *JobStore) GetAllByStatus(status string) ([]*model.Job, error) { - ret := _m.Called(status) +// GetAllByStatus provides a mock function with given fields: c, status +func (_m *JobStore) GetAllByStatus(c *request.Context, status string) ([]*model.Job, error) { + ret := _m.Called(c, status) var r0 []*model.Job var r1 error - if rf, ok := ret.Get(0).(func(string) ([]*model.Job, error)); ok { - return rf(status) + if rf, ok := ret.Get(0).(func(*request.Context, string) ([]*model.Job, error)); ok { + return rf(c, status) } - if rf, ok := ret.Get(0).(func(string) []*model.Job); ok { - r0 = rf(status) + if rf, ok := ret.Get(0).(func(*request.Context, string) []*model.Job); ok { + r0 = rf(c, status) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.Job) } } - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(status) + if rf, ok := ret.Get(1).(func(*request.Context, string) error); ok { + r1 = rf(c, status) } else { r1 = ret.Error(1) } @@ -104,25 +105,25 @@ func (_m *JobStore) GetAllByStatus(status string) ([]*model.Job, error) { return r0, r1 } -// GetAllByType provides a mock function with given fields: jobType -func (_m *JobStore) GetAllByType(jobType string) ([]*model.Job, error) { - ret := _m.Called(jobType) +// GetAllByType provides a mock function with given fields: c, jobType +func (_m *JobStore) GetAllByType(c *request.Context, jobType string) ([]*model.Job, error) { + ret := _m.Called(c, jobType) var r0 []*model.Job var r1 error - if rf, ok := ret.Get(0).(func(string) ([]*model.Job, error)); ok { - return rf(jobType) + if rf, ok := ret.Get(0).(func(*request.Context, string) ([]*model.Job, error)); ok { + return rf(c, jobType) } - if rf, ok := ret.Get(0).(func(string) []*model.Job); ok { - r0 = rf(jobType) + if rf, ok := ret.Get(0).(func(*request.Context, string) []*model.Job); ok { + r0 = rf(c, jobType) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.Job) } } - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(jobType) + if rf, ok := ret.Get(1).(func(*request.Context, string) error); ok { + r1 = rf(c, jobType) } else { r1 = ret.Error(1) } @@ -130,25 +131,25 @@ func (_m *JobStore) GetAllByType(jobType string) ([]*model.Job, error) { return r0, r1 } -// GetAllByTypeAndStatus provides a mock function with given fields: jobType, status -func (_m *JobStore) GetAllByTypeAndStatus(jobType string, status string) ([]*model.Job, error) { - ret := _m.Called(jobType, status) +// GetAllByTypeAndStatus provides a mock function with given fields: c, jobType, status +func (_m *JobStore) GetAllByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, error) { + ret := _m.Called(c, jobType, status) var r0 []*model.Job var r1 error - if rf, ok := ret.Get(0).(func(string, string) ([]*model.Job, error)); ok { - return rf(jobType, status) + if rf, ok := ret.Get(0).(func(*request.Context, string, string) ([]*model.Job, error)); ok { + return rf(c, jobType, status) } - if rf, ok := ret.Get(0).(func(string, string) []*model.Job); ok { - r0 = rf(jobType, status) + if rf, ok := ret.Get(0).(func(*request.Context, string, string) []*model.Job); ok { + r0 = rf(c, jobType, status) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.Job) } } - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(jobType, status) + if rf, ok := ret.Get(1).(func(*request.Context, string, string) error); ok { + r1 = rf(c, jobType, status) } else { r1 = ret.Error(1) } @@ -156,25 +157,25 @@ func (_m *JobStore) GetAllByTypeAndStatus(jobType string, status string) ([]*mod return r0, r1 } -// GetAllByTypePage provides a mock function with given fields: jobType, offset, limit -func (_m *JobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) { - ret := _m.Called(jobType, offset, limit) +// GetAllByTypePage provides a mock function with given fields: c, jobType, offset, limit +func (_m *JobStore) GetAllByTypePage(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, error) { + ret := _m.Called(c, jobType, offset, limit) var r0 []*model.Job var r1 error - if rf, ok := ret.Get(0).(func(string, int, int) ([]*model.Job, error)); ok { - return rf(jobType, offset, limit) + if rf, ok := ret.Get(0).(func(*request.Context, string, int, int) ([]*model.Job, error)); ok { + return rf(c, jobType, offset, limit) } - if rf, ok := ret.Get(0).(func(string, int, int) []*model.Job); ok { - r0 = rf(jobType, offset, limit) + if rf, ok := ret.Get(0).(func(*request.Context, string, int, int) []*model.Job); ok { + r0 = rf(c, jobType, offset, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.Job) } } - if rf, ok := ret.Get(1).(func(string, int, int) error); ok { - r1 = rf(jobType, offset, limit) + if rf, ok := ret.Get(1).(func(*request.Context, string, int, int) error); ok { + r1 = rf(c, jobType, offset, limit) } else { r1 = ret.Error(1) } @@ -182,51 +183,25 @@ func (_m *JobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]* return r0, r1 } -// GetAllByTypesPage provides a mock function with given fields: jobTypes, offset, limit -func (_m *JobStore) GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) { - ret := _m.Called(jobTypes, offset, limit) +// GetAllByTypesPage provides a mock function with given fields: c, jobTypes, offset, limit +func (_m *JobStore) GetAllByTypesPage(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, error) { + ret := _m.Called(c, jobTypes, offset, limit) var r0 []*model.Job var r1 error - if rf, ok := ret.Get(0).(func([]string, int, int) ([]*model.Job, error)); ok { - return rf(jobTypes, offset, limit) + if rf, ok := ret.Get(0).(func(*request.Context, []string, int, int) ([]*model.Job, error)); ok { + return rf(c, jobTypes, offset, limit) } - if rf, ok := ret.Get(0).(func([]string, int, int) []*model.Job); ok { - r0 = rf(jobTypes, offset, limit) + if rf, ok := ret.Get(0).(func(*request.Context, []string, int, int) []*model.Job); ok { + r0 = rf(c, jobTypes, offset, limit) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.Job) } } - if rf, ok := ret.Get(1).(func([]string, int, int) error); ok { - r1 = rf(jobTypes, offset, limit) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetAllPage provides a mock function with given fields: offset, limit -func (_m *JobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) { - ret := _m.Called(offset, limit) - - var r0 []*model.Job - var r1 error - if rf, ok := ret.Get(0).(func(int, int) ([]*model.Job, error)); ok { - return rf(offset, limit) - } - if rf, ok := ret.Get(0).(func(int, int) []*model.Job); ok { - r0 = rf(offset, limit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Job) - } - } - - if rf, ok := ret.Get(1).(func(int, int) error); ok { - r1 = rf(offset, limit) + if rf, ok := ret.Get(1).(func(*request.Context, []string, int, int) error); ok { + r1 = rf(c, jobTypes, offset, limit) } else { r1 = ret.Error(1) } diff --git a/server/channels/store/timerlayer/timerlayer.go b/server/channels/store/timerlayer/timerlayer.go index bd17c1dfe4..de74f5ce00 100644 --- a/server/channels/store/timerlayer/timerlayer.go +++ b/server/channels/store/timerlayer/timerlayer.go @@ -11,6 +11,7 @@ import ( "time" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/store" "github.com/mattermost/mattermost/server/v8/einterfaces" ) @@ -4480,10 +4481,10 @@ func (s *TimerLayerJobStore) Delete(id string) (string, error) { return result, err } -func (s *TimerLayerJobStore) Get(id string) (*model.Job, error) { +func (s *TimerLayerJobStore) Get(c *request.Context, id string) (*model.Job, error) { start := time.Now() - result, err := s.JobStore.Get(id) + result, err := s.JobStore.Get(c, id) elapsed := float64(time.Since(start)) / float64(time.Second) if s.Root.Metrics != nil { @@ -4496,10 +4497,10 @@ func (s *TimerLayerJobStore) Get(id string) (*model.Job, error) { return result, err } -func (s *TimerLayerJobStore) GetAllByStatus(status string) ([]*model.Job, error) { +func (s *TimerLayerJobStore) GetAllByStatus(c *request.Context, status string) ([]*model.Job, error) { start := time.Now() - result, err := s.JobStore.GetAllByStatus(status) + result, err := s.JobStore.GetAllByStatus(c, status) elapsed := float64(time.Since(start)) / float64(time.Second) if s.Root.Metrics != nil { @@ -4512,10 +4513,10 @@ func (s *TimerLayerJobStore) GetAllByStatus(status string) ([]*model.Job, error) return result, err } -func (s *TimerLayerJobStore) GetAllByType(jobType string) ([]*model.Job, error) { +func (s *TimerLayerJobStore) GetAllByType(c *request.Context, jobType string) ([]*model.Job, error) { start := time.Now() - result, err := s.JobStore.GetAllByType(jobType) + result, err := s.JobStore.GetAllByType(c, jobType) elapsed := float64(time.Since(start)) / float64(time.Second) if s.Root.Metrics != nil { @@ -4528,10 +4529,10 @@ func (s *TimerLayerJobStore) GetAllByType(jobType string) ([]*model.Job, error) return result, err } -func (s *TimerLayerJobStore) GetAllByTypeAndStatus(jobType string, status string) ([]*model.Job, error) { +func (s *TimerLayerJobStore) GetAllByTypeAndStatus(c *request.Context, jobType string, status string) ([]*model.Job, error) { start := time.Now() - result, err := s.JobStore.GetAllByTypeAndStatus(jobType, status) + result, err := s.JobStore.GetAllByTypeAndStatus(c, jobType, status) elapsed := float64(time.Since(start)) / float64(time.Second) if s.Root.Metrics != nil { @@ -4544,10 +4545,10 @@ func (s *TimerLayerJobStore) GetAllByTypeAndStatus(jobType string, status string return result, err } -func (s *TimerLayerJobStore) GetAllByTypePage(jobType string, offset int, limit int) ([]*model.Job, error) { +func (s *TimerLayerJobStore) GetAllByTypePage(c *request.Context, jobType string, offset int, limit int) ([]*model.Job, error) { start := time.Now() - result, err := s.JobStore.GetAllByTypePage(jobType, offset, limit) + result, err := s.JobStore.GetAllByTypePage(c, jobType, offset, limit) elapsed := float64(time.Since(start)) / float64(time.Second) if s.Root.Metrics != nil { @@ -4560,10 +4561,10 @@ func (s *TimerLayerJobStore) GetAllByTypePage(jobType string, offset int, limit return result, err } -func (s *TimerLayerJobStore) GetAllByTypesPage(jobTypes []string, offset int, limit int) ([]*model.Job, error) { +func (s *TimerLayerJobStore) GetAllByTypesPage(c *request.Context, jobTypes []string, offset int, limit int) ([]*model.Job, error) { start := time.Now() - result, err := s.JobStore.GetAllByTypesPage(jobTypes, offset, limit) + result, err := s.JobStore.GetAllByTypesPage(c, jobTypes, offset, limit) elapsed := float64(time.Since(start)) / float64(time.Second) if s.Root.Metrics != nil { @@ -4576,22 +4577,6 @@ func (s *TimerLayerJobStore) GetAllByTypesPage(jobTypes []string, offset int, li return result, err } -func (s *TimerLayerJobStore) GetAllPage(offset int, limit int) ([]*model.Job, error) { - start := time.Now() - - result, err := s.JobStore.GetAllPage(offset, limit) - - elapsed := float64(time.Since(start)) / float64(time.Second) - if s.Root.Metrics != nil { - success := "false" - if err == nil { - success = "true" - } - s.Root.Metrics.ObserveStoreMethodDuration("JobStore.GetAllPage", success, elapsed) - } - return result, err -} - func (s *TimerLayerJobStore) GetCountByStatusAndType(status string, jobType string) (int64, error) { start := time.Now() diff --git a/server/channels/web/handlers.go b/server/channels/web/handlers.go index 71a3688532..9a8902e649 100644 --- a/server/channels/web/handlers.go +++ b/server/channels/web/handlers.go @@ -170,14 +170,17 @@ func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } t, _ := i18n.GetTranslationsAndLocaleFromRequest(r) - c.AppContext.SetT(t) - c.AppContext.SetRequestId(requestID) - c.AppContext.SetIPAddress(utils.GetIPAddress(r, c.App.Config().ServiceSettings.TrustedProxyIPHeader)) - c.AppContext.SetXForwardedFor(r.Header.Get("X-Forwarded-For")) - c.AppContext.SetUserAgent(r.UserAgent()) - c.AppContext.SetAcceptLanguage(r.Header.Get("Accept-Language")) - c.AppContext.SetPath(r.URL.Path) - c.AppContext.SetContext(context.Background()) + c.AppContext = request.NewContext( + context.Background(), + requestID, + utils.GetIPAddress(r, c.App.Config().ServiceSettings.TrustedProxyIPHeader), + r.Header.Get("X-Forwarded-For"), + r.URL.Path, + r.UserAgent(), + r.Header.Get("Accept-Language"), + t, + ) + c.Params = ParamsFromRequest(r) c.Logger = c.App.Log() diff --git a/server/channels/web/handlers_test.go b/server/channels/web/handlers_test.go index e8a2254b75..cb9fa99eec 100644 --- a/server/channels/web/handlers_test.go +++ b/server/channels/web/handlers_test.go @@ -558,7 +558,7 @@ func TestGenerateDevCSP(t *testing.T) { *cfg.ServiceSettings.DeveloperFlags = "" }) - logger := mlog.CreateConsoleTestLogger(false, mlog.LvlWarn) + logger := mlog.CreateConsoleTestLogger(t) buf := &mlog.Buffer{} require.NoError(t, mlog.AddWriterTarget(logger, buf, false, mlog.LvlWarn)) @@ -570,8 +570,6 @@ func TestGenerateDevCSP(t *testing.T) { generateDevCSP(*c) - require.NoError(t, logger.Shutdown()) - assert.Equal(t, "", buf.String()) }) } diff --git a/server/channels/web/oauth_test.go b/server/channels/web/oauth_test.go index b90c8f6a44..2efd54370a 100644 --- a/server/channels/web/oauth_test.go +++ b/server/channels/web/oauth_test.go @@ -608,8 +608,7 @@ func TestOAuthComplete_ErrorMessages(t *testing.T) { translationFunc := i18n.GetUserTranslations("en") c.AppContext.SetT(translationFunc) - c.Logger = mlog.CreateConsoleTestLogger(true, mlog.LvlDebug) - defer c.Logger.Shutdown() + c.Logger = mlog.CreateConsoleTestLogger(t) th.App.UpdateConfig(func(cfg *model.Config) { *cfg.GitLabSettings.Enable = true }) th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.EnableOAuthServiceProvider = true }) provider := &MattermostTestProvider{} diff --git a/server/cmd/mattermost/commands/export.go b/server/cmd/mattermost/commands/export.go index 0dcceaa4c2..816e091c87 100644 --- a/server/cmd/mattermost/commands/export.go +++ b/server/cmd/mattermost/commands/export.go @@ -138,7 +138,10 @@ func scheduleExportCmdF(command *cobra.Command, args []string) error { defer cancel() } - job, err := messageExportI.StartSynchronizeJob(ctx, startTime) + c := request.EmptyContext(a.Log()) + c.SetContext(ctx) + + job, err := messageExportI.StartSynchronizeJob(c, startTime) if err != nil || job.Status == model.JobStatusError || job.Status == model.JobStatusCanceled { CommandPrintErrorln("ERROR: Message export job failed. Please check the server logs") } else { diff --git a/server/cmd/mmctl/commands/export_e2e_test.go b/server/cmd/mmctl/commands/export_e2e_test.go index 55ea15c95a..4257ecc04d 100644 --- a/server/cmd/mmctl/commands/export_e2e_test.go +++ b/server/cmd/mmctl/commands/export_e2e_test.go @@ -10,6 +10,7 @@ import ( "path/filepath" "time" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/cmd/mmctl/client" "github.com/mattermost/mattermost/server/v8/cmd/mmctl/printer" @@ -275,16 +276,20 @@ func (s *MmctlE2ETestSuite) TestExportDownloadCmdF() { func (s *MmctlE2ETestSuite) TestExportJobShowCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) - job, appErr := s.th.App.CreateJob(&model.Job{ + job, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) + job.Logger = nil + + time.Sleep(time.Millisecond) s.Run("MM-T3885 - no permissions", func() { printer.Clean() - job1, appErr := s.th.App.CreateJob(&model.Job{ + job1, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) @@ -317,6 +322,7 @@ func (s *MmctlE2ETestSuite) TestExportJobShowCmdF() { func (s *MmctlE2ETestSuite) TestExportJobListCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) s.Run("MM-T3887 - no permissions", func() { printer.Clean() @@ -356,24 +362,26 @@ func (s *MmctlE2ETestSuite) TestExportJobListCmdF() { cmd.Flags().Int("per-page", perPage, "") cmd.Flags().Bool("all", false, "") - _, appErr := s.th.App.CreateJob(&model.Job{ + _, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) time.Sleep(time.Millisecond) - job2, appErr := s.th.App.CreateJob(&model.Job{ + job2, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) + job2.Logger = nil time.Sleep(time.Millisecond) - job3, appErr := s.th.App.CreateJob(&model.Job{ + job3, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) + job3.Logger = nil err := exportJobListCmdF(c, cmd, nil) s.Require().Nil(err) @@ -386,13 +394,14 @@ func (s *MmctlE2ETestSuite) TestExportJobListCmdF() { func (s *MmctlE2ETestSuite) TestExportJobCancelCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) s.Run("Cancel an export job without permissions", func() { printer.Clean() cmd := &cobra.Command{} - job, appErr := s.th.App.CreateJob(&model.Job{ + job, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) @@ -421,14 +430,14 @@ func (s *MmctlE2ETestSuite) TestExportJobCancelCmdF() { cmd := &cobra.Command{} - job1, appErr := s.th.App.CreateJob(&model.Job{ + job1, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) time.Sleep(time.Millisecond) - job2, appErr := s.th.App.CreateJob(&model.Job{ + job2, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExportProcess, }) s.Require().Nil(appErr) @@ -439,11 +448,11 @@ func (s *MmctlE2ETestSuite) TestExportJobCancelCmdF() { s.Require().Empty(printer.GetErrorLines()) // Get job1 again to refresh its status - job1, appErr = s.th.App.GetJob(job1.Id) + job1, appErr = s.th.App.GetJob(ctx, job1.Id) s.Require().Nil(appErr) // Get job2 again to ensure its status did not change - job2, _ = s.th.App.GetJob(job2.Id) + job2, _ = s.th.App.GetJob(ctx, job2.Id) s.Require().Nil(appErr) s.Require().Equal(job1.Status, model.JobStatusCanceled) diff --git a/server/cmd/mmctl/commands/extract_e2e_test.go b/server/cmd/mmctl/commands/extract_e2e_test.go index 65da74eabc..2b69b16a4e 100644 --- a/server/cmd/mmctl/commands/extract_e2e_test.go +++ b/server/cmd/mmctl/commands/extract_e2e_test.go @@ -9,6 +9,7 @@ import ( "path/filepath" "time" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/cmd/mmctl/client" "github.com/mattermost/mattermost/server/v8/cmd/mmctl/printer" @@ -70,17 +71,19 @@ func (s *MmctlE2ETestSuite) TestExtractRunCmdF() { func (s *MmctlE2ETestSuite) TestExtractJobShowCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) - job, appErr := s.th.App.CreateJob(&model.Job{ + job, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExtractContent, Data: map[string]string{}, }) s.Require().Nil(appErr) + job.Logger = nil s.Run("no permissions", func() { printer.Clean() - job1, appErr := s.th.App.CreateJob(&model.Job{ + job1, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExtractContent, Data: map[string]string{}, }) @@ -116,6 +119,7 @@ func (s *MmctlE2ETestSuite) TestExtractJobShowCmdF() { func (s *MmctlE2ETestSuite) TestExtractJobListCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) s.Run("no permissions", func() { printer.Clean() @@ -156,7 +160,7 @@ func (s *MmctlE2ETestSuite) TestExtractJobListCmdF() { cmd.Flags().Int("per-page", perPage, "") cmd.Flags().Bool("all", false, "") - _, appErr := s.th.App.CreateJob(&model.Job{ + _, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExtractContent, Data: map[string]string{}, }) @@ -164,19 +168,23 @@ func (s *MmctlE2ETestSuite) TestExtractJobListCmdF() { time.Sleep(time.Millisecond) - job2, appErr := s.th.App.CreateJob(&model.Job{ + job2, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExtractContent, Data: map[string]string{}, }) s.Require().Nil(appErr) + job2.Logger = nil time.Sleep(time.Millisecond) - job3, appErr := s.th.App.CreateJob(&model.Job{ + job3, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeExtractContent, Data: map[string]string{}, }) s.Require().Nil(appErr) + job3.Logger = nil + + time.Sleep(time.Millisecond) err := extractJobListCmdF(c, cmd, nil) s.Require().Nil(err) diff --git a/server/cmd/mmctl/commands/import_e2e_test.go b/server/cmd/mmctl/commands/import_e2e_test.go index 7eff0621f3..428e1f645c 100644 --- a/server/cmd/mmctl/commands/import_e2e_test.go +++ b/server/cmd/mmctl/commands/import_e2e_test.go @@ -9,6 +9,7 @@ import ( "path/filepath" "time" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/cmd/mmctl/client" "github.com/mattermost/mattermost/server/v8/cmd/mmctl/printer" @@ -250,17 +251,19 @@ func (s *MmctlE2ETestSuite) TestImportListIncompleteCmdF() { func (s *MmctlE2ETestSuite) TestImportJobShowCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) - job, appErr := s.th.App.CreateJob(&model.Job{ + job, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeImportProcess, Data: map[string]string{"import_file": "import1.zip"}, }) s.Require().Nil(appErr) + job.Logger = nil s.Run("no permissions", func() { printer.Clean() - job1, appErr := s.th.App.CreateJob(&model.Job{ + job1, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeImportProcess, Data: map[string]string{"import_file": "import1.zip"}, }) @@ -296,6 +299,7 @@ func (s *MmctlE2ETestSuite) TestImportJobShowCmdF() { func (s *MmctlE2ETestSuite) TestImportJobListCmdF() { s.SetupTestHelper().InitBasic() + ctx := request.EmptyContext(s.th.App.Log()) s.Run("no permissions", func() { printer.Clean() @@ -336,7 +340,7 @@ func (s *MmctlE2ETestSuite) TestImportJobListCmdF() { cmd.Flags().Int("per-page", perPage, "") cmd.Flags().Bool("all", false, "") - _, appErr := s.th.App.CreateJob(&model.Job{ + _, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeImportProcess, Data: map[string]string{"import_file": "import1.zip"}, }) @@ -344,19 +348,21 @@ func (s *MmctlE2ETestSuite) TestImportJobListCmdF() { time.Sleep(time.Millisecond) - job2, appErr := s.th.App.CreateJob(&model.Job{ + job2, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeImportProcess, Data: map[string]string{"import_file": "import2.zip"}, }) s.Require().Nil(appErr) + job2.Logger = nil time.Sleep(time.Millisecond) - job3, appErr := s.th.App.CreateJob(&model.Job{ + job3, appErr := s.th.App.CreateJob(ctx, &model.Job{ Type: model.JobTypeImportProcess, Data: map[string]string{"import_file": "import3.zip"}, }) s.Require().Nil(appErr) + job3.Logger = nil err := importJobListCmdF(c, cmd, nil) s.Require().Nil(err) diff --git a/server/cmd/mmctl/commands/ldap_e2e_test.go b/server/cmd/mmctl/commands/ldap_e2e_test.go index deccccd633..1c91338db7 100644 --- a/server/cmd/mmctl/commands/ldap_e2e_test.go +++ b/server/cmd/mmctl/commands/ldap_e2e_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/api4" "github.com/mattermost/mattermost/server/v8/channels/utils/testutils" "github.com/spf13/cobra" @@ -51,6 +52,7 @@ func configForLdap(th *api4.TestHelper) { func (s *MmctlE2ETestSuite) TestLdapSyncCmd() { s.SetupEnterpriseTestHelper().InitBasic() configForLdap(s.th) + ctx := request.EmptyContext(s.th.App.Log()) s.Run("MM-T3971 Should not allow regular user to sync LDAP groups", func() { printer.Clean() @@ -64,7 +66,7 @@ func (s *MmctlE2ETestSuite) TestLdapSyncCmd() { s.RunForSystemAdminAndLocal("MM-T2529 Should sync LDAP groups", func(c client.Client) { printer.Clean() - jobs, appErr := s.th.App.GetJobsByTypePage(model.JobTypeLdapSync, 0, 100) + jobs, appErr := s.th.App.GetJobsByTypePage(ctx, model.JobTypeLdapSync, 0, 100) s.Require().Nil(appErr) initialNumJobs := len(jobs) @@ -78,7 +80,7 @@ func (s *MmctlE2ETestSuite) TestLdapSyncCmd() { // we need to wait a bit for job creation time.Sleep(time.Second) - jobs, appErr = s.th.App.GetJobsByTypePage(model.JobTypeLdapSync, 0, 100) + jobs, appErr = s.th.App.GetJobsByTypePage(ctx, model.JobTypeLdapSync, 0, 100) s.Require().Nil(appErr) s.Require().NotEmpty(jobs) s.Assert().Equal(initialNumJobs+1, len(jobs)) diff --git a/server/einterfaces/account_migration.go b/server/einterfaces/account_migration.go index ccbf52df22..e142660b64 100644 --- a/server/einterfaces/account_migration.go +++ b/server/einterfaces/account_migration.go @@ -5,9 +5,10 @@ package einterfaces import ( "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" ) type AccountMigrationInterface interface { - MigrateToLdap(fromAuthService string, foreignUserFieldNameToMatch string, force bool, dryRun bool) *model.AppError + MigrateToLdap(c *request.Context, fromAuthService string, foreignUserFieldNameToMatch string, force bool, dryRun bool) *model.AppError MigrateToSaml(fromAuthService string, usersMap map[string]string, auto bool, dryRun bool) *model.AppError } diff --git a/server/einterfaces/jobs/cloud_interface.go b/server/einterfaces/jobs/cloud_interface.go index 15214355cc..96e0d022dc 100644 --- a/server/einterfaces/jobs/cloud_interface.go +++ b/server/einterfaces/jobs/cloud_interface.go @@ -9,5 +9,5 @@ import ( type CloudJobInterface interface { MakeWorker() model.Worker - MakeScheduler() model.Scheduler + MakeScheduler() Scheduler } diff --git a/server/einterfaces/jobs/data_retention.go b/server/einterfaces/jobs/data_retention.go index 2a1e4b6a2a..266dd2df34 100644 --- a/server/einterfaces/jobs/data_retention.go +++ b/server/einterfaces/jobs/data_retention.go @@ -9,5 +9,5 @@ import ( type DataRetentionJobInterface interface { MakeWorker() model.Worker - MakeScheduler() model.Scheduler + MakeScheduler() Scheduler } diff --git a/server/einterfaces/jobs/elasticsearch.go b/server/einterfaces/jobs/elasticsearch.go index 892880579e..930452a289 100644 --- a/server/einterfaces/jobs/elasticsearch.go +++ b/server/einterfaces/jobs/elasticsearch.go @@ -13,7 +13,7 @@ type ElasticsearchIndexerInterface interface { type ElasticsearchAggregatorInterface interface { MakeWorker() model.Worker - MakeScheduler() model.Scheduler + MakeScheduler() Scheduler } type ElasticsearchFixChannelIndexInterface interface { diff --git a/server/einterfaces/jobs/ldap_sync.go b/server/einterfaces/jobs/ldap_sync.go index bc1b20c7fb..82b2986db1 100644 --- a/server/einterfaces/jobs/ldap_sync.go +++ b/server/einterfaces/jobs/ldap_sync.go @@ -9,5 +9,5 @@ import ( type LdapSyncInterface interface { MakeWorker() model.Worker - MakeScheduler() model.Scheduler + MakeScheduler() Scheduler } diff --git a/server/einterfaces/jobs/message_export.go b/server/einterfaces/jobs/message_export.go index b33d07585d..00c3d25f73 100644 --- a/server/einterfaces/jobs/message_export.go +++ b/server/einterfaces/jobs/message_export.go @@ -9,5 +9,5 @@ import ( type MessageExportJobInterface interface { MakeWorker() model.Worker - MakeScheduler() model.Scheduler + MakeScheduler() Scheduler } diff --git a/server/einterfaces/jobs/scheduler.go b/server/einterfaces/jobs/scheduler.go new file mode 100644 index 0000000000..feb7fbc612 --- /dev/null +++ b/server/einterfaces/jobs/scheduler.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package jobs + +import ( + "time" + + "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" +) + +type Scheduler interface { + Enabled(cfg *model.Config) bool + NextScheduleTime(cfg *model.Config, now time.Time, pendingJobs bool, lastSuccessfulJob *model.Job) *time.Time + ScheduleJob(c *request.Context, cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) +} diff --git a/server/einterfaces/ldap.go b/server/einterfaces/ldap.go index 92471a2a07..48f5e6bd59 100644 --- a/server/einterfaces/ldap.go +++ b/server/einterfaces/ldap.go @@ -16,7 +16,7 @@ type LdapInterface interface { CheckPasswordAuthData(authData string, password string) *model.AppError CheckProviderAttributes(LS *model.LdapSettings, ouser *model.User, patch *model.UserPatch) string SwitchToLdap(userID, ldapID, ldapPassword string) *model.AppError - StartSynchronizeJob(waitForJobToFinish bool, includeRemovedMembers bool) (*model.Job, *model.AppError) + StartSynchronizeJob(c *request.Context, waitForJobToFinish bool, includeRemovedMembers bool) (*model.Job, *model.AppError) RunTest() *model.AppError GetAllLdapUsers() ([]*model.User, *model.AppError) MigrateIDAttribute(toAttribute string) error diff --git a/server/einterfaces/message_export.go b/server/einterfaces/message_export.go index 9ee0f150f1..c341f50d27 100644 --- a/server/einterfaces/message_export.go +++ b/server/einterfaces/message_export.go @@ -4,12 +4,11 @@ package einterfaces import ( - "context" - "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/request" ) type MessageExportInterface interface { - StartSynchronizeJob(ctx context.Context, exportFromTimestamp int64) (*model.Job, *model.AppError) + StartSynchronizeJob(c *request.Context, exportFromTimestamp int64) (*model.Job, *model.AppError) RunExport(format string, since int64, limit int) (int64, *model.AppError) } diff --git a/server/einterfaces/mocks/AccountMigrationInterface.go b/server/einterfaces/mocks/AccountMigrationInterface.go index 508aa1982b..c5f50a64fa 100644 --- a/server/einterfaces/mocks/AccountMigrationInterface.go +++ b/server/einterfaces/mocks/AccountMigrationInterface.go @@ -6,6 +6,7 @@ package mocks import ( model "github.com/mattermost/mattermost/server/public/model" + request "github.com/mattermost/mattermost/server/public/shared/request" mock "github.com/stretchr/testify/mock" ) @@ -14,13 +15,13 @@ type AccountMigrationInterface struct { mock.Mock } -// MigrateToLdap provides a mock function with given fields: fromAuthService, foreignUserFieldNameToMatch, force, dryRun -func (_m *AccountMigrationInterface) MigrateToLdap(fromAuthService string, foreignUserFieldNameToMatch string, force bool, dryRun bool) *model.AppError { - ret := _m.Called(fromAuthService, foreignUserFieldNameToMatch, force, dryRun) +// MigrateToLdap provides a mock function with given fields: c, fromAuthService, foreignUserFieldNameToMatch, force, dryRun +func (_m *AccountMigrationInterface) MigrateToLdap(c *request.Context, fromAuthService string, foreignUserFieldNameToMatch string, force bool, dryRun bool) *model.AppError { + ret := _m.Called(c, fromAuthService, foreignUserFieldNameToMatch, force, dryRun) var r0 *model.AppError - if rf, ok := ret.Get(0).(func(string, string, bool, bool) *model.AppError); ok { - r0 = rf(fromAuthService, foreignUserFieldNameToMatch, force, dryRun) + if rf, ok := ret.Get(0).(func(*request.Context, string, string, bool, bool) *model.AppError); ok { + r0 = rf(c, fromAuthService, foreignUserFieldNameToMatch, force, dryRun) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.AppError) diff --git a/server/einterfaces/mocks/AppContextInterface.go b/server/einterfaces/mocks/AppContextInterface.go deleted file mode 100644 index d97ff04891..0000000000 --- a/server/einterfaces/mocks/AppContextInterface.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -// Regenerate this file using `make einterfaces-mocks`. - -package mocks - -import ( - model "github.com/mattermost/mattermost/server/public/model" - mock "github.com/stretchr/testify/mock" -) - -// AppContextInterface is an autogenerated mock type for the AppContextInterface type -type AppContextInterface struct { - mock.Mock -} - -// AcceptLanguage provides a mock function with given fields: -func (_m *AppContextInterface) AcceptLanguage() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// IPAddress provides a mock function with given fields: -func (_m *AppContextInterface) IPAddress() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// Path provides a mock function with given fields: -func (_m *AppContextInterface) Path() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// RequestId provides a mock function with given fields: -func (_m *AppContextInterface) RequestId() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// Session provides a mock function with given fields: -func (_m *AppContextInterface) Session() *model.Session { - ret := _m.Called() - - var r0 *model.Session - if rf, ok := ret.Get(0).(func() *model.Session); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Session) - } - } - - return r0 -} - -// T provides a mock function with given fields: translationID, args -func (_m *AppContextInterface) T(translationID string, args ...any) string { - var _ca []any - _ca = append(_ca, translationID) - _ca = append(_ca, args...) - ret := _m.Called(_ca...) - - var r0 string - if rf, ok := ret.Get(0).(func(string, ...any) string); ok { - r0 = rf(translationID, args...) - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// UserAgent provides a mock function with given fields: -func (_m *AppContextInterface) UserAgent() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} diff --git a/server/einterfaces/mocks/CloudJobInterface.go b/server/einterfaces/mocks/CloudJobInterface.go index 9a609ff637..9365c9095c 100644 --- a/server/einterfaces/mocks/CloudJobInterface.go +++ b/server/einterfaces/mocks/CloudJobInterface.go @@ -5,8 +5,10 @@ package mocks import ( - model "github.com/mattermost/mattermost/server/public/model" + jobs "github.com/mattermost/mattermost/server/v8/einterfaces/jobs" mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost/server/public/model" ) // CloudJobInterface is an autogenerated mock type for the CloudJobInterface type @@ -15,15 +17,15 @@ type CloudJobInterface struct { } // MakeScheduler provides a mock function with given fields: -func (_m *CloudJobInterface) MakeScheduler() model.Scheduler { +func (_m *CloudJobInterface) MakeScheduler() jobs.Scheduler { ret := _m.Called() - var r0 model.Scheduler - if rf, ok := ret.Get(0).(func() model.Scheduler); ok { + var r0 jobs.Scheduler + if rf, ok := ret.Get(0).(func() jobs.Scheduler); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Scheduler) + r0 = ret.Get(0).(jobs.Scheduler) } } diff --git a/server/einterfaces/mocks/DataRetentionJobInterface.go b/server/einterfaces/mocks/DataRetentionJobInterface.go index ea4bd29a69..8b6d90107f 100644 --- a/server/einterfaces/mocks/DataRetentionJobInterface.go +++ b/server/einterfaces/mocks/DataRetentionJobInterface.go @@ -5,8 +5,10 @@ package mocks import ( - model "github.com/mattermost/mattermost/server/public/model" + jobs "github.com/mattermost/mattermost/server/v8/einterfaces/jobs" mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost/server/public/model" ) // DataRetentionJobInterface is an autogenerated mock type for the DataRetentionJobInterface type @@ -15,15 +17,15 @@ type DataRetentionJobInterface struct { } // MakeScheduler provides a mock function with given fields: -func (_m *DataRetentionJobInterface) MakeScheduler() model.Scheduler { +func (_m *DataRetentionJobInterface) MakeScheduler() jobs.Scheduler { ret := _m.Called() - var r0 model.Scheduler - if rf, ok := ret.Get(0).(func() model.Scheduler); ok { + var r0 jobs.Scheduler + if rf, ok := ret.Get(0).(func() jobs.Scheduler); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Scheduler) + r0 = ret.Get(0).(jobs.Scheduler) } } diff --git a/server/einterfaces/mocks/ElasticsearchAggregatorInterface.go b/server/einterfaces/mocks/ElasticsearchAggregatorInterface.go index 09a59bdaa3..0f155f391b 100644 --- a/server/einterfaces/mocks/ElasticsearchAggregatorInterface.go +++ b/server/einterfaces/mocks/ElasticsearchAggregatorInterface.go @@ -5,8 +5,10 @@ package mocks import ( - model "github.com/mattermost/mattermost/server/public/model" + jobs "github.com/mattermost/mattermost/server/v8/einterfaces/jobs" mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost/server/public/model" ) // ElasticsearchAggregatorInterface is an autogenerated mock type for the ElasticsearchAggregatorInterface type @@ -15,15 +17,15 @@ type ElasticsearchAggregatorInterface struct { } // MakeScheduler provides a mock function with given fields: -func (_m *ElasticsearchAggregatorInterface) MakeScheduler() model.Scheduler { +func (_m *ElasticsearchAggregatorInterface) MakeScheduler() jobs.Scheduler { ret := _m.Called() - var r0 model.Scheduler - if rf, ok := ret.Get(0).(func() model.Scheduler); ok { + var r0 jobs.Scheduler + if rf, ok := ret.Get(0).(func() jobs.Scheduler); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Scheduler) + r0 = ret.Get(0).(jobs.Scheduler) } } diff --git a/server/einterfaces/mocks/LdapInterface.go b/server/einterfaces/mocks/LdapInterface.go index 8ea7b81a1b..c266abdc75 100644 --- a/server/einterfaces/mocks/LdapInterface.go +++ b/server/einterfaces/mocks/LdapInterface.go @@ -334,25 +334,25 @@ func (_m *LdapInterface) RunTest() *model.AppError { return r0 } -// StartSynchronizeJob provides a mock function with given fields: waitForJobToFinish, includeRemovedMembers -func (_m *LdapInterface) StartSynchronizeJob(waitForJobToFinish bool, includeRemovedMembers bool) (*model.Job, *model.AppError) { - ret := _m.Called(waitForJobToFinish, includeRemovedMembers) +// StartSynchronizeJob provides a mock function with given fields: c, waitForJobToFinish, includeRemovedMembers +func (_m *LdapInterface) StartSynchronizeJob(c *request.Context, waitForJobToFinish bool, includeRemovedMembers bool) (*model.Job, *model.AppError) { + ret := _m.Called(c, waitForJobToFinish, includeRemovedMembers) var r0 *model.Job var r1 *model.AppError - if rf, ok := ret.Get(0).(func(bool, bool) (*model.Job, *model.AppError)); ok { - return rf(waitForJobToFinish, includeRemovedMembers) + if rf, ok := ret.Get(0).(func(*request.Context, bool, bool) (*model.Job, *model.AppError)); ok { + return rf(c, waitForJobToFinish, includeRemovedMembers) } - if rf, ok := ret.Get(0).(func(bool, bool) *model.Job); ok { - r0 = rf(waitForJobToFinish, includeRemovedMembers) + if rf, ok := ret.Get(0).(func(*request.Context, bool, bool) *model.Job); ok { + r0 = rf(c, waitForJobToFinish, includeRemovedMembers) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.Job) } } - if rf, ok := ret.Get(1).(func(bool, bool) *model.AppError); ok { - r1 = rf(waitForJobToFinish, includeRemovedMembers) + if rf, ok := ret.Get(1).(func(*request.Context, bool, bool) *model.AppError); ok { + r1 = rf(c, waitForJobToFinish, includeRemovedMembers) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) diff --git a/server/einterfaces/mocks/LdapSyncInterface.go b/server/einterfaces/mocks/LdapSyncInterface.go index c5329297cb..e5f4ae9ada 100644 --- a/server/einterfaces/mocks/LdapSyncInterface.go +++ b/server/einterfaces/mocks/LdapSyncInterface.go @@ -5,8 +5,10 @@ package mocks import ( - model "github.com/mattermost/mattermost/server/public/model" + jobs "github.com/mattermost/mattermost/server/v8/einterfaces/jobs" mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost/server/public/model" ) // LdapSyncInterface is an autogenerated mock type for the LdapSyncInterface type @@ -15,15 +17,15 @@ type LdapSyncInterface struct { } // MakeScheduler provides a mock function with given fields: -func (_m *LdapSyncInterface) MakeScheduler() model.Scheduler { +func (_m *LdapSyncInterface) MakeScheduler() jobs.Scheduler { ret := _m.Called() - var r0 model.Scheduler - if rf, ok := ret.Get(0).(func() model.Scheduler); ok { + var r0 jobs.Scheduler + if rf, ok := ret.Get(0).(func() jobs.Scheduler); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Scheduler) + r0 = ret.Get(0).(jobs.Scheduler) } } diff --git a/server/einterfaces/mocks/MessageExportInterface.go b/server/einterfaces/mocks/MessageExportInterface.go index e2066bcda2..06571fe105 100644 --- a/server/einterfaces/mocks/MessageExportInterface.go +++ b/server/einterfaces/mocks/MessageExportInterface.go @@ -5,11 +5,9 @@ package mocks import ( - context "context" - - mock "github.com/stretchr/testify/mock" - model "github.com/mattermost/mattermost/server/public/model" + request "github.com/mattermost/mattermost/server/public/shared/request" + mock "github.com/stretchr/testify/mock" ) // MessageExportInterface is an autogenerated mock type for the MessageExportInterface type @@ -43,25 +41,25 @@ func (_m *MessageExportInterface) RunExport(format string, since int64, limit in return r0, r1 } -// StartSynchronizeJob provides a mock function with given fields: ctx, exportFromTimestamp -func (_m *MessageExportInterface) StartSynchronizeJob(ctx context.Context, exportFromTimestamp int64) (*model.Job, *model.AppError) { - ret := _m.Called(ctx, exportFromTimestamp) +// StartSynchronizeJob provides a mock function with given fields: c, exportFromTimestamp +func (_m *MessageExportInterface) StartSynchronizeJob(c *request.Context, exportFromTimestamp int64) (*model.Job, *model.AppError) { + ret := _m.Called(c, exportFromTimestamp) var r0 *model.Job var r1 *model.AppError - if rf, ok := ret.Get(0).(func(context.Context, int64) (*model.Job, *model.AppError)); ok { - return rf(ctx, exportFromTimestamp) + if rf, ok := ret.Get(0).(func(*request.Context, int64) (*model.Job, *model.AppError)); ok { + return rf(c, exportFromTimestamp) } - if rf, ok := ret.Get(0).(func(context.Context, int64) *model.Job); ok { - r0 = rf(ctx, exportFromTimestamp) + if rf, ok := ret.Get(0).(func(*request.Context, int64) *model.Job); ok { + r0 = rf(c, exportFromTimestamp) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.Job) } } - if rf, ok := ret.Get(1).(func(context.Context, int64) *model.AppError); ok { - r1 = rf(ctx, exportFromTimestamp) + if rf, ok := ret.Get(1).(func(*request.Context, int64) *model.AppError); ok { + r1 = rf(c, exportFromTimestamp) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) diff --git a/server/einterfaces/mocks/MessageExportJobInterface.go b/server/einterfaces/mocks/MessageExportJobInterface.go index b39003ac3a..e33dd12d48 100644 --- a/server/einterfaces/mocks/MessageExportJobInterface.go +++ b/server/einterfaces/mocks/MessageExportJobInterface.go @@ -5,8 +5,10 @@ package mocks import ( - model "github.com/mattermost/mattermost/server/public/model" + jobs "github.com/mattermost/mattermost/server/v8/einterfaces/jobs" mock "github.com/stretchr/testify/mock" + + model "github.com/mattermost/mattermost/server/public/model" ) // MessageExportJobInterface is an autogenerated mock type for the MessageExportJobInterface type @@ -15,15 +17,15 @@ type MessageExportJobInterface struct { } // MakeScheduler provides a mock function with given fields: -func (_m *MessageExportJobInterface) MakeScheduler() model.Scheduler { +func (_m *MessageExportJobInterface) MakeScheduler() jobs.Scheduler { ret := _m.Called() - var r0 model.Scheduler - if rf, ok := ret.Get(0).(func() model.Scheduler); ok { + var r0 jobs.Scheduler + if rf, ok := ret.Get(0).(func() jobs.Scheduler); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Scheduler) + r0 = ret.Get(0).(jobs.Scheduler) } } diff --git a/server/einterfaces/mocks/ResendInvitationEmailJobInterface.go b/server/einterfaces/mocks/ResendInvitationEmailJobInterface.go deleted file mode 100644 index 4d915ad284..0000000000 --- a/server/einterfaces/mocks/ResendInvitationEmailJobInterface.go +++ /dev/null @@ -1,47 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -// Regenerate this file using `make einterfaces-mocks`. - -package mocks - -import ( - model "github.com/mattermost/mattermost/server/public/model" - mock "github.com/stretchr/testify/mock" -) - -// ResendInvitationEmailJobInterface is an autogenerated mock type for the ResendInvitationEmailJobInterface type -type ResendInvitationEmailJobInterface struct { - mock.Mock -} - -// MakeScheduler provides a mock function with given fields: -func (_m *ResendInvitationEmailJobInterface) MakeScheduler() model.Scheduler { - ret := _m.Called() - - var r0 model.Scheduler - if rf, ok := ret.Get(0).(func() model.Scheduler); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Scheduler) - } - } - - return r0 -} - -// MakeWorker provides a mock function with given fields: -func (_m *ResendInvitationEmailJobInterface) MakeWorker() model.Worker { - ret := _m.Called() - - var r0 model.Worker - if rf, ok := ret.Get(0).(func() model.Worker); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(model.Worker) - } - } - - return r0 -} diff --git a/server/einterfaces/mocks/Scheduler.go b/server/einterfaces/mocks/Scheduler.go new file mode 100644 index 0000000000..24011b8bea --- /dev/null +++ b/server/einterfaces/mocks/Scheduler.go @@ -0,0 +1,91 @@ +// Code generated by mockery v2.23.2. DO NOT EDIT. + +// Regenerate this file using `make einterfaces-mocks`. + +package mocks + +import ( + model "github.com/mattermost/mattermost/server/public/model" + request "github.com/mattermost/mattermost/server/public/shared/request" + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Scheduler is an autogenerated mock type for the Scheduler type +type Scheduler struct { + mock.Mock +} + +// Enabled provides a mock function with given fields: cfg +func (_m *Scheduler) Enabled(cfg *model.Config) bool { + ret := _m.Called(cfg) + + var r0 bool + if rf, ok := ret.Get(0).(func(*model.Config) bool); ok { + r0 = rf(cfg) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NextScheduleTime provides a mock function with given fields: cfg, now, pendingJobs, lastSuccessfulJob +func (_m *Scheduler) NextScheduleTime(cfg *model.Config, now time.Time, pendingJobs bool, lastSuccessfulJob *model.Job) *time.Time { + ret := _m.Called(cfg, now, pendingJobs, lastSuccessfulJob) + + var r0 *time.Time + if rf, ok := ret.Get(0).(func(*model.Config, time.Time, bool, *model.Job) *time.Time); ok { + r0 = rf(cfg, now, pendingJobs, lastSuccessfulJob) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*time.Time) + } + } + + return r0 +} + +// ScheduleJob provides a mock function with given fields: c, cfg, pendingJobs, lastSuccessfulJob +func (_m *Scheduler) ScheduleJob(c *request.Context, cfg *model.Config, pendingJobs bool, lastSuccessfulJob *model.Job) (*model.Job, *model.AppError) { + ret := _m.Called(c, cfg, pendingJobs, lastSuccessfulJob) + + var r0 *model.Job + var r1 *model.AppError + if rf, ok := ret.Get(0).(func(*request.Context, *model.Config, bool, *model.Job) (*model.Job, *model.AppError)); ok { + return rf(c, cfg, pendingJobs, lastSuccessfulJob) + } + if rf, ok := ret.Get(0).(func(*request.Context, *model.Config, bool, *model.Job) *model.Job); ok { + r0 = rf(c, cfg, pendingJobs, lastSuccessfulJob) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*model.Job) + } + } + + if rf, ok := ret.Get(1).(func(*request.Context, *model.Config, bool, *model.Job) *model.AppError); ok { + r1 = rf(c, cfg, pendingJobs, lastSuccessfulJob) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*model.AppError) + } + } + + return r0, r1 +} + +type mockConstructorTestingTNewScheduler interface { + mock.TestingT + Cleanup(func()) +} + +// NewScheduler creates a new instance of Scheduler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewScheduler(t mockConstructorTestingTNewScheduler) *Scheduler { + mock := &Scheduler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/server/platform/services/remotecluster/mocks_test.go b/server/platform/services/remotecluster/mocks_test.go index 37c7d24b8a..e45f70a53b 100644 --- a/server/platform/services/remotecluster/mocks_test.go +++ b/server/platform/services/remotecluster/mocks_test.go @@ -5,6 +5,7 @@ package remotecluster import ( "context" + "testing" "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/plugin/plugintest/mock" @@ -20,12 +21,12 @@ type mockServer struct { user *model.User } -func newMockServer(remotes []*model.RemoteCluster) *mockServer { - testLogger := mlog.CreateConsoleTestLogger(true, mlog.LvlDebug) +func newMockServer(t *testing.T, remotes []*model.RemoteCluster) *mockServer { + logger := mlog.CreateConsoleTestLogger(t) return &mockServer{ remotes: remotes, - logger: testLogger, + logger: logger, } } @@ -59,4 +60,3 @@ func (ms *mockServer) GetStore() store.Store { storeMock.On("User").Return(userStoreMock) return storeMock } -func (ms *mockServer) Shutdown() { ms.logger.Shutdown() } diff --git a/server/platform/services/remotecluster/ping_test.go b/server/platform/services/remotecluster/ping_test.go index 73978b03bb..df729683bb 100644 --- a/server/platform/services/remotecluster/ping_test.go +++ b/server/platform/services/remotecluster/ping_test.go @@ -66,8 +66,7 @@ func TestPing(t *testing.T) { })) defer ts.Close() - mockServer := newMockServer(makeRemoteClusters(NumRemotes, ts.URL)) - defer mockServer.Shutdown() + mockServer := newMockServer(t, makeRemoteClusters(NumRemotes, ts.URL)) service, err := NewRemoteClusterService(mockServer) require.NoError(t, err) @@ -116,8 +115,7 @@ func TestPing(t *testing.T) { })) defer ts.Close() - mockServer := newMockServer(makeRemoteClusters(NumRemotes, ts.URL)) - defer mockServer.Shutdown() + mockServer := newMockServer(t, makeRemoteClusters(NumRemotes, ts.URL)) service, err := NewRemoteClusterService(mockServer) require.NoError(t, err) diff --git a/server/platform/services/remotecluster/send_test.go b/server/platform/services/remotecluster/send_test.go index 9d3a392173..360d557c62 100644 --- a/server/platform/services/remotecluster/send_test.go +++ b/server/platform/services/remotecluster/send_test.go @@ -82,8 +82,7 @@ func TestBroadcastMsg(t *testing.T) { })) defer ts.Close() - mockServer := newMockServer(makeRemoteClusters(NumRemotes, ts.URL)) - defer mockServer.Shutdown() + mockServer := newMockServer(t, makeRemoteClusters(NumRemotes, ts.URL)) service, err := NewRemoteClusterService(mockServer) require.NoError(t, err) @@ -139,8 +138,7 @@ func TestBroadcastMsg(t *testing.T) { })) defer ts.Close() - mockServer := newMockServer(makeRemoteClusters(NumRemotes, ts.URL)) - defer mockServer.Shutdown() + mockServer := newMockServer(t, makeRemoteClusters(NumRemotes, ts.URL)) service, err := NewRemoteClusterService(mockServer) require.NoError(t, err) diff --git a/server/platform/services/remotecluster/sendprofileImage_test.go b/server/platform/services/remotecluster/sendprofileImage_test.go index a200297332..ab47234796 100644 --- a/server/platform/services/remotecluster/sendprofileImage_test.go +++ b/server/platform/services/remotecluster/sendprofileImage_test.go @@ -100,8 +100,7 @@ func TestService_sendProfileImageToRemote(t *testing.T) { provider := testImageProvider{} - mockServer := newMockServer(makeRemoteClusters(NumRemotes, ts.URL)) - defer mockServer.Shutdown() + mockServer := newMockServer(t, makeRemoteClusters(NumRemotes, ts.URL)) mockServer.SetUser(user) service, err := NewRemoteClusterService(mockServer) require.NoError(t, err) diff --git a/server/platform/services/remotecluster/service_test.go b/server/platform/services/remotecluster/service_test.go index 9e20377c75..9773a24444 100644 --- a/server/platform/services/remotecluster/service_test.go +++ b/server/platform/services/remotecluster/service_test.go @@ -29,8 +29,7 @@ func TestService_AddTopicListener(t *testing.T) { return nil } - mockServer := newMockServer(makeRemoteClusters(NumRemotes, "")) - defer mockServer.Shutdown() + mockServer := newMockServer(t, makeRemoteClusters(NumRemotes, "")) service, err := NewRemoteClusterService(mockServer) require.NoError(t, err) diff --git a/server/platform/services/searchengine/bleveengine/indexer/indexing_job.go b/server/platform/services/searchengine/bleveengine/indexer/indexing_job.go index 58110842b9..db73517f27 100644 --- a/server/platform/services/searchengine/bleveengine/indexer/indexing_job.go +++ b/server/platform/services/searchengine/bleveengine/indexer/indexing_job.go @@ -12,6 +12,7 @@ import ( "github.com/mattermost/mattermost/server/public/model" "github.com/mattermost/mattermost/server/public/shared/mlog" + "github.com/mattermost/mattermost/server/public/shared/request" "github.com/mattermost/mattermost/server/v8/channels/jobs" "github.com/mattermost/mattermost/server/v8/platform/services/searchengine/bleveengine" ) @@ -31,20 +32,23 @@ type BleveIndexerWorker struct { stopped chan bool jobs chan model.Job jobServer *jobs.JobServer + logger mlog.LoggerIFace engine *bleveengine.BleveEngine closed int32 } -func MakeWorker(jobServer *jobs.JobServer, engine *bleveengine.BleveEngine) model.Worker { +func MakeWorker(jobServer *jobs.JobServer, engine *bleveengine.BleveEngine) *BleveIndexerWorker { if engine == nil { return nil } + const workerName = "BleveIndexer" return &BleveIndexerWorker{ - name: "BleveIndexer", + name: workerName, stop: make(chan struct{}), stopped: make(chan bool, 1), jobs: make(chan model.Job), jobServer: jobServer, + logger: jobServer.Logger().With(mlog.String("workername", workerName)), engine: engine, } } @@ -97,20 +101,22 @@ func (worker *BleveIndexerWorker) Run() { if atomic.CompareAndSwapInt32(&worker.closed, 1, 0) { worker.stop = make(chan struct{}) } - mlog.Debug("Worker Started", mlog.String("workername", worker.name)) + worker.logger.Debug("Worker Started") defer func() { - mlog.Debug("Worker: Finished", mlog.String("workername", worker.name)) + worker.logger.Debug("Worker: Finished") worker.stopped <- true }() for { select { case <-worker.stop: - mlog.Debug("Worker: Received stop signal", mlog.String("workername", worker.name)) + worker.logger.Debug("Worker: Received stop signal") return case job := <-worker.jobs: - mlog.Debug("Worker: Received a new candidate job.", mlog.String("workername", worker.name)) + job.Logger = job.Logger.With(mlog.String("workername", worker.name)) + + job.Logger.Debug("Worker: Received a new candidate job.") worker.DoJob(&job) } } @@ -121,7 +127,7 @@ func (worker *BleveIndexerWorker) Stop() { if !atomic.CompareAndSwapInt32(&worker.closed, 0, 1) { return } - mlog.Debug("Worker Stopping", mlog.String("workername", worker.name)) + worker.logger.Debug("Worker Stopping") close(worker.stop) <-worker.stopped } @@ -129,19 +135,19 @@ func (worker *BleveIndexerWorker) Stop() { func (worker *BleveIndexerWorker) DoJob(job *model.Job) { claimed, err := worker.jobServer.ClaimJob(job) if err != nil { - mlog.Warn("Worker: Error occurred while trying to claim job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Warn("Worker: Error occurred while trying to claim job", mlog.Err(err)) return } if !claimed { return } - mlog.Info("Worker: Indexing job claimed by worker", mlog.String("workername", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: Indexing job claimed by worker") if !worker.engine.IsActive() { appError := model.NewAppError("BleveIndexerWorker", "bleveengine.indexer.do_job.engine_inactive", nil, "", http.StatusInternalServerError) if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to run job as ") + job.Logger.Error("Worker: Failed to run job as ") } return } @@ -160,10 +166,10 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { if startString, ok := job.Data["start_time"]; ok { startInt, err := strconv.ParseInt(startString, 10, 64) if err != nil { - mlog.Error("Worker: Failed to parse start_time for job", mlog.String("workername", worker.name), mlog.String("start_time", startString), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to parse start_time for job", mlog.String("start_time", startString), mlog.Err(err)) appError := model.NewAppError("BleveIndexerWorker", "bleveengine.indexer.do_job.parse_start_time.error", nil, "", http.StatusInternalServerError).Wrap(err) if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err), mlog.NamedErr("set_error", appError)) + job.Logger.Error("Worker: Failed to set job error", mlog.Err(err), mlog.NamedErr("set_error", appError)) } return } @@ -173,10 +179,10 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { // A user or a channel may be created before any post. oldestEntityCreationTime, err := worker.jobServer.Store.Post().GetOldestEntityCreationTime() if err != nil { - mlog.Error("Worker: Failed to fetch oldest entity for job.", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.String("start_time", startString), mlog.Err(err)) + job.Logger.Error("Worker: Failed to fetch oldest entity for job.", mlog.String("start_time", startString), mlog.Err(err)) appError := model.NewAppError("BleveIndexerWorker", "bleveengine.indexer.do_job.get_oldest_entity.error", nil, "", http.StatusInternalServerError).Wrap(err) if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err), mlog.NamedErr("set_error", appError)) + job.Logger.Error("Worker: Failed to set job error", mlog.Err(err), mlog.NamedErr("set_error", appError)) } return } @@ -187,10 +193,10 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { if endString, ok := job.Data["end_time"]; ok { endInt, err := strconv.ParseInt(endString, 10, 64) if err != nil { - mlog.Error("Worker: Failed to parse end_time for job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.String("end_time", endString), mlog.Err(err)) + job.Logger.Error("Worker: Failed to parse end_time for job", mlog.String("end_time", endString), mlog.Err(err)) appError := model.NewAppError("BleveIndexerWorker", "bleveengine.indexer.do_job.parse_end_time.error", nil, "", http.StatusInternalServerError).Wrap(err) if err := worker.jobServer.SetJobError(job, appError); err != nil { - mlog.Error("Worker: Failed to set job errorv", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err), mlog.NamedErr("set_error", appError)) + job.Logger.Error("Worker: Failed to set job errorv", mlog.Err(err), mlog.NamedErr("set_error", appError)) } return } @@ -213,7 +219,7 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { // Counting all posts may fail or timeout when the posts table is large. If this happens, log a warning, but carry // on with the indexing job anyway. The only issue is that the progress % reporting will be inaccurate. if count, err := worker.jobServer.Store.Post().AnalyticsPostCount(&model.PostCountOptions{}); err != nil { - mlog.Warn("Worker: Failed to fetch total post count for job. An estimated value will be used for progress reporting.", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Warn("Worker: Failed to fetch total post count for job. An estimated value will be used for progress reporting.", mlog.Err(err)) progress.TotalPostsCount = estimatedPostCount } else { progress.TotalPostsCount = count @@ -221,7 +227,7 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { // Same possible fail as above can happen when counting channels if count, err := worker.jobServer.Store.Channel().AnalyticsTypeCount("", ""); err != nil { - mlog.Warn("Worker: Failed to fetch total channel count for job. An estimated value will be used for progress reporting.", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Warn("Worker: Failed to fetch total channel count for job. An estimated value will be used for progress reporting.", mlog.Err(err)) progress.TotalChannelsCount = estimatedChannelCount } else { progress.TotalChannelsCount = count @@ -232,7 +238,7 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { IncludeBotAccounts: true, // This actually doesn't join with the bots table // since ExcludeRegularUsers is set to false }); err != nil { - mlog.Warn("Worker: Failed to fetch total user count for job. An estimated value will be used for progress reporting.", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Warn("Worker: Failed to fetch total user count for job. An estimated value will be used for progress reporting.", mlog.Err(err)) progress.TotalUsersCount = estimatedUserCount } else { progress.TotalUsersCount = count @@ -241,40 +247,41 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { // Counting all files may fail or timeout when the file_info table is large. If this happens, log a warning, but carry // on with the indexing job anyway. The only issue is that the progress % reporting will be inaccurate. if count, err := worker.jobServer.Store.FileInfo().CountAll(); err != nil { - mlog.Warn("Worker: Failed to fetch total file info count for job. An estimated value will be used for progress reporting.", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Warn("Worker: Failed to fetch total file info count for job. An estimated value will be used for progress reporting.", mlog.Err(err)) progress.TotalFilesCount = estimatedFilesCount } else { progress.TotalFilesCount = count } + cancelContext := request.EmptyContext(worker.logger) cancelCtx, cancelCancelWatcher := context.WithCancel(context.Background()) cancelWatcherChan := make(chan struct{}, 1) - go worker.jobServer.CancellationWatcher(cancelCtx, job.Id, cancelWatcherChan) - + cancelContext.SetContext(cancelCtx) + go worker.jobServer.CancellationWatcher(cancelContext, job.Id, cancelWatcherChan) defer cancelCancelWatcher() for { select { case <-cancelWatcherChan: - mlog.Info("Worker: Indexing job has been canceled via CancellationWatcher", mlog.String("workername", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: Indexing job has been canceled via CancellationWatcher") if err := worker.jobServer.SetJobCanceled(job); err != nil { - mlog.Error("Worker: Failed to mark job as cancelled", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to mark job as cancelled", mlog.Err(err)) } return case <-worker.stop: - mlog.Info("Worker: Indexing has been canceled via Worker Stop", mlog.String("workername", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: Indexing has been canceled via Worker Stop") if err := worker.jobServer.SetJobCanceled(job); err != nil { - mlog.Error("Worker: Failed to mark job as canceled", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to mark job as canceled", mlog.Err(err)) } return case <-time.After(timeBetweenBatches): var err *model.AppError - if progress, err = worker.IndexBatch(progress); err != nil { - mlog.Error("Worker: Failed to index batch for job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + if progress, err = worker.IndexBatch(job.Logger, progress); err != nil { + job.Logger.Error("Worker: Failed to index batch for job", mlog.Err(err)) if err2 := worker.jobServer.SetJobError(job, err); err2 != nil { - mlog.Error("Worker: Failed to set job error", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err2), mlog.NamedErr("set_error", err)) + job.Logger.Error("Worker: Failed to set job error", mlog.Err(err2), mlog.NamedErr("set_error", err)) } return } @@ -293,44 +300,44 @@ func (worker *BleveIndexerWorker) DoJob(job *model.Job) { job.Data["end_time"] = strconv.FormatInt(progress.EndAtTime, 10) if err := worker.jobServer.SetJobProgress(job, progress.CurrentProgress()); err != nil { - mlog.Error("Worker: Failed to set progress for job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to set progress for job", mlog.Err(err)) if err2 := worker.jobServer.SetJobError(job, err); err2 != nil { - mlog.Error("Worker: Failed to set error for job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err2), mlog.NamedErr("set_error", err)) + job.Logger.Error("Worker: Failed to set error for job", mlog.Err(err2), mlog.NamedErr("set_error", err)) } return } if progress.IsDone() { if err := worker.jobServer.SetJobSuccess(job); err != nil { - mlog.Error("Worker: Failed to set success for job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err)) + job.Logger.Error("Worker: Failed to set success for job", mlog.Err(err)) if err2 := worker.jobServer.SetJobError(job, err); err2 != nil { - mlog.Error("Worker: Failed to set error for job", mlog.String("workername", worker.name), mlog.String("job_id", job.Id), mlog.Err(err2), mlog.NamedErr("set_error", err)) + job.Logger.Error("Worker: Failed to set error for job", mlog.Err(err2), mlog.NamedErr("set_error", err)) } } - mlog.Info("Worker: Indexing job finished successfully", mlog.String("workername", worker.name), mlog.String("job_id", job.Id)) + job.Logger.Info("Worker: Indexing job finished successfully") return } } } } -func (worker *BleveIndexerWorker) IndexBatch(progress IndexingProgress) (IndexingProgress, *model.AppError) { +func (worker *BleveIndexerWorker) IndexBatch(logger mlog.LoggerIFace, progress IndexingProgress) (IndexingProgress, *model.AppError) { if !progress.DonePosts { - return worker.IndexPostsBatch(progress) + return worker.IndexPostsBatch(logger, progress) } if !progress.DoneChannels { - return worker.IndexChannelsBatch(progress) + return worker.IndexChannelsBatch(logger, progress) } if !progress.DoneUsers { - return worker.IndexUsersBatch(progress) + return worker.IndexUsersBatch(logger, progress) } if !progress.DoneFiles { - return worker.IndexFilesBatch(progress) + return worker.IndexFilesBatch(logger, progress) } return progress, model.NewAppError("BleveIndexerWorker", "bleveengine.indexer.index_batch.nothing_left_to_index.error", nil, "", http.StatusInternalServerError) } -func (worker *BleveIndexerWorker) IndexPostsBatch(progress IndexingProgress) (IndexingProgress, *model.AppError) { +func (worker *BleveIndexerWorker) IndexPostsBatch(logger mlog.LoggerIFace, progress IndexingProgress) (IndexingProgress, *model.AppError) { var posts []*model.PostForIndexing tries := 0 @@ -341,7 +348,7 @@ func (worker *BleveIndexerWorker) IndexPostsBatch(progress IndexingProgress) (In if tries >= 10 { return progress, model.NewAppError("IndexPostsBatch", "app.post.get_posts_batch_for_indexing.get.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } - mlog.Warn("Failed to get posts batch for indexing. Retrying.", mlog.Err(err)) + logger.Warn("Failed to get posts batch for indexing. Retrying.", mlog.Err(err)) // Wait a bit before trying again. time.Sleep(15 * time.Second) @@ -398,7 +405,7 @@ func (worker *BleveIndexerWorker) BulkIndexPosts(posts []*model.PostForIndexing, return &posts[len(posts)-1].Post, nil } -func (worker *BleveIndexerWorker) IndexFilesBatch(progress IndexingProgress) (IndexingProgress, *model.AppError) { +func (worker *BleveIndexerWorker) IndexFilesBatch(logger mlog.LoggerIFace, progress IndexingProgress) (IndexingProgress, *model.AppError) { var files []*model.FileForIndexing tries := 0 @@ -409,7 +416,7 @@ func (worker *BleveIndexerWorker) IndexFilesBatch(progress IndexingProgress) (In if tries >= 10 { return progress, model.NewAppError("IndexFilesBatch", "app.post.get_files_batch_for_indexing.get.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } - mlog.Warn("Failed to get files batch for indexing. Retrying.", mlog.Err(err)) + logger.Warn("Failed to get files batch for indexing. Retrying.", mlog.Err(err)) // Wait a bit before trying again. time.Sleep(15 * time.Second) @@ -465,7 +472,7 @@ func (worker *BleveIndexerWorker) BulkIndexFiles(files []*model.FileForIndexing, return &files[len(files)-1].FileInfo, nil } -func (worker *BleveIndexerWorker) IndexChannelsBatch(progress IndexingProgress) (IndexingProgress, *model.AppError) { +func (worker *BleveIndexerWorker) IndexChannelsBatch(logger mlog.LoggerIFace, progress IndexingProgress) (IndexingProgress, *model.AppError) { var channels []*model.Channel tries := 0 @@ -477,7 +484,7 @@ func (worker *BleveIndexerWorker) IndexChannelsBatch(progress IndexingProgress) return progress, model.NewAppError("BleveIndexerWorker.IndexChannelsBatch", "app.channel.get_channels_batch_for_indexing.get.app_error", nil, "", http.StatusInternalServerError).Wrap(nErr) } - mlog.Warn("Failed to get channels batch for indexing. Retrying.", mlog.Err(nErr)) + logger.Warn("Failed to get channels batch for indexing. Retrying.", mlog.Err(nErr)) // Wait a bit before trying again. time.Sleep(15 * time.Second) @@ -491,7 +498,7 @@ func (worker *BleveIndexerWorker) IndexChannelsBatch(progress IndexingProgress) return progress, nil } - lastChannel, err := worker.BulkIndexChannels(channels, progress) + lastChannel, err := worker.BulkIndexChannels(logger, channels, progress) if err != nil { return progress, err } @@ -511,7 +518,7 @@ func (worker *BleveIndexerWorker) IndexChannelsBatch(progress IndexingProgress) return progress, nil } -func (worker *BleveIndexerWorker) BulkIndexChannels(channels []*model.Channel, progress IndexingProgress) (*model.Channel, *model.AppError) { +func (worker *BleveIndexerWorker) BulkIndexChannels(logger mlog.LoggerIFace, channels []*model.Channel, progress IndexingProgress) (*model.Channel, *model.AppError) { batch := worker.engine.ChannelIndex.NewBatch() for _, channel := range channels { @@ -547,7 +554,7 @@ func (worker *BleveIndexerWorker) BulkIndexChannels(channels []*model.Channel, p return channels[len(channels)-1], nil } -func (worker *BleveIndexerWorker) IndexUsersBatch(progress IndexingProgress) (IndexingProgress, *model.AppError) { +func (worker *BleveIndexerWorker) IndexUsersBatch(logger mlog.LoggerIFace, progress IndexingProgress) (IndexingProgress, *model.AppError) { var users []*model.UserForIndexing tries := 0 @@ -556,7 +563,7 @@ func (worker *BleveIndexerWorker) IndexUsersBatch(progress IndexingProgress) (In if tries >= 10 { return progress, model.NewAppError("IndexUsersBatch", "app.user.get_users_batch_for_indexing.get_users.app_error", nil, "", http.StatusInternalServerError).Wrap(err) } - mlog.Warn("Failed to get users batch for indexing. Retrying.", mlog.Err(err)) + logger.Warn("Failed to get users batch for indexing. Retrying.", mlog.Err(err)) // Wait a bit before trying again. time.Sleep(15 * time.Second) @@ -573,7 +580,7 @@ func (worker *BleveIndexerWorker) IndexUsersBatch(progress IndexingProgress) (In return progress, nil } - lastUser, err := worker.BulkIndexUsers(users, progress) + lastUser, err := worker.BulkIndexUsers(logger, users, progress) if err != nil { return progress, err } @@ -592,7 +599,7 @@ func (worker *BleveIndexerWorker) IndexUsersBatch(progress IndexingProgress) (In return progress, nil } -func (worker *BleveIndexerWorker) BulkIndexUsers(users []*model.UserForIndexing, progress IndexingProgress) (*model.UserForIndexing, *model.AppError) { +func (worker *BleveIndexerWorker) BulkIndexUsers(logger mlog.LoggerIFace, users []*model.UserForIndexing, progress IndexingProgress) (*model.UserForIndexing, *model.AppError) { batch := worker.engine.UserIndex.NewBatch() for _, user := range users { diff --git a/server/platform/services/searchengine/bleveengine/indexer/indexing_job_test.go b/server/platform/services/searchengine/bleveengine/indexer/indexing_job_test.go index 50f434d367..1df114a5be 100644 --- a/server/platform/services/searchengine/bleveengine/indexer/indexing_job_test.go +++ b/server/platform/services/searchengine/bleveengine/indexer/indexing_job_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" "github.com/mattermost/mattermost/server/public/model" + "github.com/mattermost/mattermost/server/public/shared/mlog" "github.com/mattermost/mattermost/server/v8/channels/jobs" "github.com/mattermost/mattermost/server/v8/channels/store/storetest" "github.com/mattermost/mattermost/server/v8/channels/utils/testutils" @@ -22,12 +23,14 @@ func TestBleveIndexer(t *testing.T) { defer mockStore.AssertExpectations(t) t.Run("Call GetOldestEntityCreationTime for the first indexing call", func(t *testing.T) { + logger := mlog.CreateConsoleTestLogger(t) job := &model.Job{ Id: model.NewId(), CreateAt: model.GetMillis(), Status: model.JobStatusPending, Type: model.JobTypeBlevePostIndexing, } + job.InitLogger(logger) mockStore.JobStore.On("UpdateStatusOptimistically", job.Id, model.JobStatusPending, model.JobStatusInProgress).Return(true, nil) mockStore.JobStore.On("UpdateOptimistically", job, model.JobStatusInProgress).Return(true, nil) diff --git a/server/platform/services/slackimport/slackimport_test.go b/server/platform/services/slackimport/slackimport_test.go index 821abf532f..553b6b1a68 100644 --- a/server/platform/services/slackimport/slackimport_test.go +++ b/server/platform/services/slackimport/slackimport_test.go @@ -339,8 +339,7 @@ func TestOldImportChannel(t *testing.T) { store := &mocks.Store{} config := &model.Config{} config.SetDefaults() - ctx := request.EmptyContext(nil) - ctx.SetLogger(mlog.CreateConsoleTestLogger(true, mlog.LvlDebug)) + ctx := request.EmptyContext(mlog.CreateConsoleTestLogger(t)) t.Run("No panic on direct channel", func(t *testing.T) { // ch := th.CreateDmChannel(u1) diff --git a/server/platform/shared/filestore/filesstore_test.go b/server/platform/shared/filestore/filesstore_test.go index ddf5f6eb89..83bdd4df0c 100644 --- a/server/platform/shared/filestore/filesstore_test.go +++ b/server/platform/shared/filestore/filesstore_test.go @@ -36,8 +36,7 @@ type FileBackendTestSuite struct { func TestLocalFileBackendTestSuite(t *testing.T) { // Setup a global logger to catch tests logging outside of app context // The global logger will be stomped by apps initializing but that's fine for testing. Ideally this won't happen. - logger := mlog.CreateConsoleTestLogger(true, mlog.LvlError) - defer logger.Shutdown() + logger := mlog.CreateConsoleTestLogger(t) mlog.InitGlobalLogger(logger) diff --git a/server/public/model/job.go b/server/public/model/job.go index 5c1605d7cf..7af6c8851c 100644 --- a/server/public/model/job.go +++ b/server/public/model/job.go @@ -6,6 +6,8 @@ package model import ( "net/http" "time" + + "github.com/mattermost/mattermost/server/public/shared/mlog" ) const ( @@ -80,6 +82,8 @@ type Job struct { Status string `json:"status"` Progress int64 `json:"progress"` Data StringMap `json:"data"` + + Logger *mlog.Logger `json:"-"` } func (j *Job) Auditable() map[string]interface{} { @@ -120,15 +124,19 @@ func (j *Job) IsValid() *AppError { return nil } +// InitLogger attaches an annotated logger to a Job. +// It should always be called after creating a new Job to ensure `Job.Logger` it set. +func (j *Job) InitLogger(logger mlog.LoggerIFace) { + j.Logger = logger.With( + mlog.String("job_id", j.Id), + mlog.String("job_type", j.Type), + mlog.String("create_at", time.UnixMilli(j.CreateAt).String()), + ) +} + type Worker interface { Run() Stop() JobChannel() chan<- Job IsEnabled(cfg *Config) bool } - -type Scheduler interface { - Enabled(cfg *Config) bool - NextScheduleTime(cfg *Config, now time.Time, pendingJobs bool, lastSuccessfulJob *Job) *time.Time - ScheduleJob(cfg *Config, pendingJobs bool, lastSuccessfulJob *Job) (*Job, *AppError) -} diff --git a/server/public/plugin/health_check_test.go b/server/public/plugin/health_check_test.go index 974928ea25..b05e8be5ca 100644 --- a/server/public/plugin/health_check_test.go +++ b/server/public/plugin/health_check_test.go @@ -51,10 +51,8 @@ func testPluginHealthCheckSuccess(t *testing.T) { require.NoError(t, err) bundle := model.BundleInfoForPath(dir) - log := mlog.CreateConsoleTestLogger(true, mlog.LvlError) - defer log.Shutdown() - - supervisor, err := newSupervisor(bundle, nil, nil, log, nil) + logger := mlog.CreateConsoleTestLogger(t) + supervisor, err := newSupervisor(bundle, nil, nil, logger, nil) require.NoError(t, err) require.NotNil(t, supervisor) defer supervisor.Shutdown() @@ -94,10 +92,8 @@ func testPluginHealthCheckPanic(t *testing.T) { require.NoError(t, err) bundle := model.BundleInfoForPath(dir) - log := mlog.CreateConsoleTestLogger(true, mlog.LvlError) - defer log.Shutdown() - - supervisor, err := newSupervisor(bundle, nil, nil, log, nil) + logger := mlog.CreateConsoleTestLogger(t) + supervisor, err := newSupervisor(bundle, nil, nil, logger, nil) require.NoError(t, err) require.NotNil(t, supervisor) defer supervisor.Shutdown() diff --git a/server/public/plugin/supervisor_test.go b/server/public/plugin/supervisor_test.go index 364d4cb01b..d9dfeb8b27 100644 --- a/server/public/plugin/supervisor_test.go +++ b/server/public/plugin/supervisor_test.go @@ -34,9 +34,8 @@ func testSupervisorInvalidExecutablePath(t *testing.T) { os.WriteFile(filepath.Join(dir, "plugin.json"), []byte(`{"id": "foo", "server": {"executable": "/foo/../../backend.exe"}}`), 0600) bundle := model.BundleInfoForPath(dir) - log := mlog.CreateConsoleTestLogger(true, mlog.LvlError) - defer log.Shutdown() - supervisor, err := newSupervisor(bundle, nil, nil, log, nil) + logger := mlog.CreateConsoleTestLogger(t) + supervisor, err := newSupervisor(bundle, nil, nil, logger, nil) assert.Nil(t, supervisor) assert.Error(t, err) } @@ -49,9 +48,8 @@ func testSupervisorNonExistentExecutablePath(t *testing.T) { os.WriteFile(filepath.Join(dir, "plugin.json"), []byte(`{"id": "foo", "server": {"executable": "thisfileshouldnotexist"}}`), 0600) bundle := model.BundleInfoForPath(dir) - log := mlog.CreateConsoleTestLogger(true, mlog.LvlError) - defer log.Shutdown() - supervisor, err := newSupervisor(bundle, nil, nil, log, nil) + logger := mlog.CreateConsoleTestLogger(t) + supervisor, err := newSupervisor(bundle, nil, nil, logger, nil) require.Error(t, err) require.Nil(t, supervisor) } @@ -75,9 +73,8 @@ func testSupervisorStartTimeout(t *testing.T) { os.WriteFile(filepath.Join(dir, "plugin.json"), []byte(`{"id": "foo", "server": {"executable": "backend.exe"}}`), 0600) bundle := model.BundleInfoForPath(dir) - log := mlog.CreateConsoleTestLogger(true, mlog.LvlError) - defer log.Shutdown() - supervisor, err := newSupervisor(bundle, nil, nil, log, nil) + logger := mlog.CreateConsoleTestLogger(t) + supervisor, err := newSupervisor(bundle, nil, nil, logger, nil) require.Error(t, err) require.Nil(t, supervisor) } diff --git a/server/public/shared/mlog/mlog.go b/server/public/shared/mlog/mlog.go index 060f3cd302..5bfcac963a 100644 --- a/server/public/shared/mlog/mlog.go +++ b/server/public/shared/mlog/mlog.go @@ -40,6 +40,7 @@ type LoggerIFace interface { LogM([]Level, string, ...Field) With(fields ...Field) *Logger Flush() error + Sugar(fields ...Field) Sugar StdLogger(level Level) *log.Logger } diff --git a/server/public/shared/mlog/tlog.go b/server/public/shared/mlog/tlog.go index ef8f6016a0..749e559140 100644 --- a/server/public/shared/mlog/tlog.go +++ b/server/public/shared/mlog/tlog.go @@ -8,6 +8,7 @@ import ( "io" "os" "sync" + "testing" "github.com/mattermost/logr/v2" "github.com/mattermost/logr/v2/formatters" @@ -33,26 +34,66 @@ func AddWriterTarget(logger *Logger, w io.Writer, useJSON bool, levels ...Level) } // CreateConsoleTestLogger creates a logger for unit tests. Log records are output to `os.Stdout`. -// Logs can also be mirrored to the optional `io.Writer`. -func CreateConsoleTestLogger(useJSON bool, level Level) *Logger { - logger, _ := NewLogger() +// All log messages with level trace or lower are logged. +// The returned logger get Shutdown() when the tests completes. The caller should not shut it down. +func CreateConsoleTestLogger(tb testing.TB) *Logger { + tb.Helper() + + logger, err := NewLogger() + if err != nil { + tb.Fatalf("failed create logger %v", err) + } filter := logr.StdFilter{ - Lvl: level, + Lvl: LvlTrace, Stacktrace: LvlPanic, } - - var formatter logr.Formatter - if useJSON { - formatter = &formatters.JSON{EnableCaller: true} - } else { - formatter = &formatters.Plain{EnableCaller: true} - } + formatter := &formatters.Plain{EnableCaller: true} target := targets.NewWriterTarget(os.Stdout) if err := logger.log.Logr().AddTarget(target, "_testcon", filter, formatter, 1000); err != nil { - panic(err) + tb.Fatalf("failed to add target %v", err) } + + tb.Cleanup(func() { + err := logger.Shutdown() + if err != nil { + tb.Fatalf("failed to shut down test logger %v", err) + } + }) + + return logger +} + +// CreateTestLogger creates a logger for unit tests. Log records are output via `t.Log`. +// All log messages with level trace or lower are logged. +// The returned logger get Shutdown() when the tests completes. The caller should not shut it down. +func CreateTestLogger(t *testing.T) *Logger { + t.Helper() + + logger, err := NewLogger() + if err != nil { + t.Fatalf("failed create logger %v", err) + } + + filter := logr.StdFilter{ + Lvl: LvlTrace, + Stacktrace: LvlPanic, + } + formatter := &formatters.Plain{EnableCaller: true} + target := targets.NewTestingTarget(t) + + if err := logger.log.Logr().AddTarget(target, "test", filter, formatter, 1000); err != nil { + t.Fatalf("failed to add target %v", err) + } + + t.Cleanup(func() { + err := logger.Shutdown() + if err != nil { + t.Errorf("failed to shut down test logger %v", err) + } + }) + return logger } diff --git a/server/public/shared/request/context.go b/server/public/shared/request/context.go index b9027b4ef1..f49e92ebe3 100644 --- a/server/public/shared/request/context.go +++ b/server/public/shared/request/context.go @@ -26,12 +26,12 @@ type Context struct { context context.Context } -func NewContext(ctx context.Context, requestId, ipAddress, path, userAgent, acceptLanguage string, session model.Session, t i18n.TranslateFunc) *Context { +func NewContext(ctx context.Context, requestId, ipAddress, xForwardedFor, path, userAgent, acceptLanguage string, t i18n.TranslateFunc) *Context { return &Context{ t: t, - session: session, requestId: requestId, ipAddress: ipAddress, + xForwardedFor: xForwardedFor, path: path, userAgent: userAgent, acceptLanguage: acceptLanguage,