From 8b969426f0e2b7207e6d20cbc030e4a55c8e4258 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 11 Sep 2019 00:23:49 +0200 Subject: [PATCH 01/53] Converting to structured logging the file jobs/jobs_watcher.go (#12121) --- jobs/jobs_watcher.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/jobs/jobs_watcher.go b/jobs/jobs_watcher.go index 632c4e3bbd..c9b4136758 100644 --- a/jobs/jobs_watcher.go +++ b/jobs/jobs_watcher.go @@ -4,7 +4,6 @@ package jobs import ( - "fmt" "math/rand" "time" @@ -68,7 +67,7 @@ func (watcher *Watcher) Stop() { func (watcher *Watcher) PollAndNotify() { jobs, err := watcher.srv.Store.Job().GetAllByStatus(model.JOB_STATUS_PENDING) if err != nil { - mlog.Error(fmt.Sprintf("Error occurred getting all pending statuses: %v", err.Error())) + mlog.Error("Error occurred getting all pending statuses.", mlog.Err(err)) return } From 814c234443a593b655e009ac757f80553692f5e1 Mon Sep 17 00:00:00 2001 From: Claudio Costa Date: Wed, 11 Sep 2019 10:56:12 +0200 Subject: [PATCH 02/53] [MM-18036] Sanitize sql LIKE terms on search endpoints (#12044) * Sanitize sql LIKE terms on search endpoints * Add search term sanitization in additional places --- store/sqlstore/channel_store.go | 13 ++--------- store/sqlstore/compliance_store.go | 1 + store/sqlstore/emoji_store.go | 2 ++ store/sqlstore/group_store.go | 4 ++-- store/sqlstore/team_store.go | 6 +++++ store/sqlstore/user_access_token_store.go | 1 + store/sqlstore/user_store.go | 23 ++++--------------- store/sqlstore/utils.go | 16 +++++++++++++ store/sqlstore/utils_test.go | 28 +++++++++++++++++++++++ 9 files changed, 62 insertions(+), 32 deletions(-) diff --git a/store/sqlstore/channel_store.go b/store/sqlstore/channel_store.go index ae01300781..0075f4ea00 100644 --- a/store/sqlstore/channel_store.go +++ b/store/sqlstore/channel_store.go @@ -2259,17 +2259,7 @@ func (s SqlChannelStore) SearchMore(userId string, teamId string, term string) ( } func (s SqlChannelStore) buildLIKEClause(term string, searchColumns string) (likeClause, likeTerm string) { - likeTerm = term - - // These chars must be removed from the like query. - for _, c := range ignoreLikeSearchChar { - likeTerm = strings.Replace(likeTerm, c, "", -1) - } - - // These chars must be escaped in the like query. - for _, c := range escapeLikeSearchChar { - likeTerm = strings.Replace(likeTerm, c, "*"+c, -1) - } + likeTerm = sanitizeSearchTerm(term, "*") if likeTerm == "" { return @@ -2429,6 +2419,7 @@ func (s SqlChannelStore) getSearchGroupChannelsQuery(userId, term string, isPost for idx, term := range terms { argName := fmt.Sprintf("Term%v", idx) + term = sanitizeSearchTerm(term, "\\") likeClauses = append(likeClauses, fmt.Sprintf(baseLikeClause, ":"+argName)) args[argName] = "%" + term + "%" } diff --git a/store/sqlstore/compliance_store.go b/store/sqlstore/compliance_store.go index bb32f5d3b0..3ae6ef5b47 100644 --- a/store/sqlstore/compliance_store.go +++ b/store/sqlstore/compliance_store.go @@ -90,6 +90,7 @@ func (s SqlComplianceStore) ComplianceExport(job *model.Compliance) ([]*model.Co keywordQuery = "AND (" for index, keyword := range keywords { + keyword = sanitizeSearchTerm(keyword, "\\") if index >= 1 { keywordQuery += " OR LOWER(Posts.Message) LIKE :Keyword" + strconv.Itoa(index) } else { diff --git a/store/sqlstore/emoji_store.go b/store/sqlstore/emoji_store.go index 8489454068..533f88e1f5 100644 --- a/store/sqlstore/emoji_store.go +++ b/store/sqlstore/emoji_store.go @@ -147,6 +147,8 @@ func (es SqlEmojiStore) Delete(emoji *model.Emoji, time int64) *model.AppError { func (es SqlEmojiStore) Search(name string, prefixOnly bool, limit int) ([]*model.Emoji, *model.AppError) { var emojis []*model.Emoji + name = sanitizeSearchTerm(name, "\\") + term := "" if !prefixOnly { term = "%" diff --git a/store/sqlstore/group_store.go b/store/sqlstore/group_store.go index b9f6c6ac5e..84f2139845 100644 --- a/store/sqlstore/group_store.go +++ b/store/sqlstore/group_store.go @@ -853,7 +853,7 @@ func (s *SqlGroupStore) groupsBySyncableBaseQuery(st model.GroupSyncableType, t } if len(opts.Q) > 0 { - pattern := fmt.Sprintf("%%%s%%", opts.Q) + pattern := fmt.Sprintf("%%%s%%", sanitizeSearchTerm(opts.Q, "\\")) operatorKeyword := "ILIKE" if s.DriverName() == model.DATABASE_DRIVER_MYSQL { operatorKeyword = "LIKE" @@ -919,7 +919,7 @@ func (s *SqlGroupStore) GetGroups(page, perPage int, opts model.GroupSearchOpts) } if len(opts.Q) > 0 { - pattern := fmt.Sprintf("%%%s%%", opts.Q) + pattern := fmt.Sprintf("%%%s%%", sanitizeSearchTerm(opts.Q, "\\")) operatorKeyword := "ILIKE" if s.DriverName() == model.DATABASE_DRIVER_MYSQL { operatorKeyword = "LIKE" diff --git a/store/sqlstore/team_store.go b/store/sqlstore/team_store.go index ec3b3770f5..0de45d0f4e 100644 --- a/store/sqlstore/team_store.go +++ b/store/sqlstore/team_store.go @@ -293,6 +293,8 @@ func (s SqlTeamStore) GetByName(name string) (*model.Team, *model.AppError) { func (s SqlTeamStore) SearchAll(term string) ([]*model.Team, *model.AppError) { var teams []*model.Team + term = sanitizeSearchTerm(term, "\\") + if _, err := s.GetReplica().Select(&teams, "SELECT * FROM Teams WHERE Name LIKE :Term OR DisplayName LIKE :Term", map[string]interface{}{"Term": term + "%"}); err != nil { return nil, model.NewAppError("SqlTeamStore.SearchAll", "store.sql_team.search_all_team.app_error", nil, "term="+term+", "+err.Error(), http.StatusInternalServerError) } @@ -303,6 +305,8 @@ func (s SqlTeamStore) SearchAll(term string) ([]*model.Team, *model.AppError) { func (s SqlTeamStore) SearchOpen(term string) ([]*model.Team, *model.AppError) { var teams []*model.Team + term = sanitizeSearchTerm(term, "\\") + if _, err := s.GetReplica().Select(&teams, "SELECT * FROM Teams WHERE Type = 'O' AND AllowOpenInvite = true AND (Name LIKE :Term OR DisplayName LIKE :Term)", map[string]interface{}{"Term": term + "%"}); err != nil { return nil, model.NewAppError("SqlTeamStore.SearchOpen", "store.sql_team.search_open_team.app_error", nil, "term="+term+", "+err.Error(), http.StatusInternalServerError) } @@ -313,6 +317,8 @@ func (s SqlTeamStore) SearchOpen(term string) ([]*model.Team, *model.AppError) { func (s SqlTeamStore) SearchPrivate(term string) ([]*model.Team, *model.AppError) { var teams []*model.Team + term = sanitizeSearchTerm(term, "\\") + query := `SELECT * FROM diff --git a/store/sqlstore/user_access_token_store.go b/store/sqlstore/user_access_token_store.go index 54b26d6ee0..bd5df11dc0 100644 --- a/store/sqlstore/user_access_token_store.go +++ b/store/sqlstore/user_access_token_store.go @@ -179,6 +179,7 @@ func (s SqlUserAccessTokenStore) GetByUser(userId string, offset, limit int) ([] } func (s SqlUserAccessTokenStore) Search(term string) ([]*model.UserAccessToken, *model.AppError) { + term = sanitizeSearchTerm(term, "\\") tokens := []*model.UserAccessToken{} params := map[string]interface{}{"Term": term + "%"} query := ` diff --git a/store/sqlstore/user_store.go b/store/sqlstore/user_store.go index 2f16172453..5a8fc3fe75 100644 --- a/store/sqlstore/user_store.go +++ b/store/sqlstore/user_store.go @@ -409,11 +409,13 @@ func applyRoleFilter(query sq.SelectBuilder, role string, isPostgreSQL bool) sq. return query } - roleParam := fmt.Sprintf("%%%s%%", role) if isPostgreSQL { + roleParam := fmt.Sprintf("%%%s%%", sanitizeSearchTerm(role, "\\")) return query.Where("u.Roles LIKE LOWER(?)", roleParam) } + roleParam := fmt.Sprintf("%%%s%%", sanitizeSearchTerm(role, "*")) + return query.Where("u.Roles LIKE ? ESCAPE '*'", roleParam) } @@ -1222,15 +1224,6 @@ func (us SqlUserStore) SearchInChannel(channelId string, term string, options *m return us.performSearch(query, term, options) } -var escapeLikeSearchChar = []string{ - "%", - "_", -} - -var ignoreLikeSearchChar = []string{ - "*", -} - var spaceFulltextSearchChar = []string{ "<", ">", @@ -1265,15 +1258,7 @@ func generateSearchQuery(query sq.SelectBuilder, terms []string, fields []string } func (us SqlUserStore) performSearch(query sq.SelectBuilder, term string, options *model.UserSearchOptions) ([]*model.User, *model.AppError) { - // These chars must be removed from the like query. - for _, c := range ignoreLikeSearchChar { - term = strings.Replace(term, c, "", -1) - } - - // These chars must be escaped in the like query. - for _, c := range escapeLikeSearchChar { - term = strings.Replace(term, c, "*"+c, -1) - } + term = sanitizeSearchTerm(term, "*") searchType := USER_SEARCH_TYPE_NAMES_NO_FULL_NAME if options.AllowEmails { diff --git a/store/sqlstore/utils.go b/store/sqlstore/utils.go index a5805b86d9..531bdf8d60 100644 --- a/store/sqlstore/utils.go +++ b/store/sqlstore/utils.go @@ -8,11 +8,27 @@ import ( "database/sql" "fmt" "strconv" + "strings" "github.com/mattermost/gorp" "github.com/mattermost/mattermost-server/mlog" ) +var escapeLikeSearchChar = []string{ + "%", + "_", +} + +func sanitizeSearchTerm(term string, escapeChar string) string { + term = strings.Replace(term, escapeChar, "", -1) + + for _, c := range escapeLikeSearchChar { + term = strings.Replace(term, c, escapeChar+c, -1) + } + + return term +} + // Converts a list of strings into a list of query parameters and a named parameter map that can // be used as part of a SQL query. func MapStringsToQueryParams(list []string, paramPrefix string) (string, map[string]interface{}) { diff --git a/store/sqlstore/utils_test.go b/store/sqlstore/utils_test.go index 2adf4dc10f..f49c1b4049 100644 --- a/store/sqlstore/utils_test.go +++ b/store/sqlstore/utils_test.go @@ -2,6 +2,8 @@ package sqlstore import ( "testing" + + "github.com/stretchr/testify/require" ) func TestMapStringsToQueryParams(t *testing.T) { @@ -30,3 +32,29 @@ func TestMapStringsToQueryParams(t *testing.T) { } }) } + +func TestSanitizeSearchTerm(t *testing.T) { + term := "test" + result := sanitizeSearchTerm(term, "\\") + require.Equal(t, result, term) + + term = "%%%" + expected := "\\%\\%\\%" + result = sanitizeSearchTerm(term, "\\") + require.Equal(t, result, expected) + + term = "%\\%\\%" + expected = "\\%\\%\\%" + result = sanitizeSearchTerm(term, "\\") + require.Equal(t, result, expected) + + term = "%_test_%" + expected = "\\%\\_test\\_\\%" + result = sanitizeSearchTerm(term, "\\") + require.Equal(t, result, expected) + + term = "**test_%" + expected = "test*_*%" + result = sanitizeSearchTerm(term, "*") + require.Equal(t, result, expected) +} From 67f86b5a638ba8db9fc054c68932990c235eeb4e Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 11 Sep 2019 13:06:52 +0200 Subject: [PATCH 03/53] Converting to structured logging the file api4/system.go (#12119) --- api4/system.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/api4/system.go b/api4/system.go index aa587b352c..878d407d63 100644 --- a/api4/system.go +++ b/api4/system.go @@ -56,7 +56,7 @@ func getSystemPing(c *Context, w http.ResponseWriter, r *http.Request) { actualGoroutines := runtime.NumGoroutine() if *c.App.Config().ServiceSettings.GoroutineHealthThreshold > 0 && actualGoroutines >= *c.App.Config().ServiceSettings.GoroutineHealthThreshold { - mlog.Warn(fmt.Sprintf("The number of running goroutines (%v) is over the health threshold (%v)", actualGoroutines, *c.App.Config().ServiceSettings.GoroutineHealthThreshold)) + mlog.Warn("The number of running goroutines is over the health threshold", mlog.Int("goroutines", actualGoroutines), mlog.Int("health_threshold", *c.App.Config().ServiceSettings.GoroutineHealthThreshold)) s[model.STATUS] = model.STATUS_UNHEALTHY } @@ -76,17 +76,17 @@ func getSystemPing(c *Context, w http.ResponseWriter, r *http.Request) { Value: currentTime, }) if writeErr != nil { - mlog.Debug(fmt.Sprintf("Unable to write to database: %s", writeErr.Error())) + mlog.Debug("Unable to write to database.", mlog.Err(writeErr)) s[dbStatusKey] = model.STATUS_UNHEALTHY s[model.STATUS] = model.STATUS_UNHEALTHY } else { healthCheck, readErr := c.App.Srv.Store.System().GetByName(healthCheckKey) if readErr != nil { - mlog.Debug(fmt.Sprintf("Unable to read from database: %s", readErr.Error())) + mlog.Debug("Unable to read from database.", mlog.Err(readErr)) s[dbStatusKey] = model.STATUS_UNHEALTHY s[model.STATUS] = model.STATUS_UNHEALTHY } else if healthCheck.Value != currentTime { - mlog.Debug(fmt.Sprintf("Incorrect healthcheck value, expected %s, got %s", currentTime, healthCheck.Value)) + mlog.Debug("Incorrect healthcheck value", mlog.String("expected", currentTime), mlog.String("got", healthCheck.Value)) s[dbStatusKey] = model.STATUS_UNHEALTHY s[model.STATUS] = model.STATUS_UNHEALTHY } else { @@ -105,7 +105,7 @@ func getSystemPing(c *Context, w http.ResponseWriter, r *http.Request) { s[model.STATUS] = model.STATUS_UNHEALTHY } } else { - mlog.Debug(fmt.Sprintf("Unable to get filestore for ping status: %s", appErr.Error())) + mlog.Debug("Unable to get filestore for ping status.", mlog.Err(appErr)) s[filestoreStatusKey] = model.STATUS_UNHEALTHY s[model.STATUS] = model.STATUS_UNHEALTHY } @@ -269,7 +269,7 @@ func postLog(c *Context, w http.ResponseWriter, r *http.Request) { err.Where = "client" c.LogError(err) } else { - mlog.Debug(fmt.Sprint(msg)) + mlog.Debug("message", mlog.String("message", msg)) } m["message"] = msg From bf847ba184623d9518280dba4056a0c6ac770633 Mon Sep 17 00:00:00 2001 From: Hector Date: Wed, 11 Sep 2019 13:33:07 +0200 Subject: [PATCH 04/53] =?UTF-8?q?MM-18290=20Using=20structured=20logging?= =?UTF-8?q?=20in=20file=20"security=5Fupdate=5Fche=E2=80=A6=20(#12123)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Refactor to use structured logging * Properly formatted with gofmt --- app/security_update_check.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app/security_update_check.go b/app/security_update_check.go index 2e3a1b2a6d..e144e89277 100644 --- a/app/security_update_check.go +++ b/app/security_update_check.go @@ -4,7 +4,6 @@ package app import ( - "fmt" "io/ioutil" "net/http" "net/url" @@ -114,7 +113,7 @@ func (s *Server) DoSecurityUpdateCheck() { } for _, user := range users { - mlog.Info(fmt.Sprintf("Sending security bulletin for %v to %v", bulletin.Id, user.Email)) + mlog.Info("Sending security bulletin", mlog.String("bulletin_id", bulletin.Id), mlog.String("user_email", user.Email)) license := s.License() mailservice.SendMailUsingConfig(user.Email, utils.T("mattermost.bulletin.subject"), string(body), s.Config(), license != nil && *license.Features.Compliance) } From 30571165815387cc29cf626cdb9fa44e950f1d46 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 11 Sep 2019 13:33:31 +0200 Subject: [PATCH 05/53] Converting to structured logging the file api4/websocket.go (#12118) --- api4/websocket.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/api4/websocket.go b/api4/websocket.go index 6592365088..164ae83b79 100644 --- a/api4/websocket.go +++ b/api4/websocket.go @@ -4,7 +4,6 @@ package api4 import ( - "fmt" "net/http" "github.com/gorilla/websocket" @@ -25,7 +24,7 @@ func connectWebSocket(c *Context, w http.ResponseWriter, r *http.Request) { ws, err := upgrader.Upgrade(w, r, nil) if err != nil { - mlog.Error(fmt.Sprintf("websocket connect err: %v", err)) + mlog.Error("websocket connect err.", mlog.Err(err)) c.Err = model.NewAppError("connect", "api.web_socket.connect.upgrade.app_error", nil, "", http.StatusInternalServerError) return } From 1076994773bd4a49435c59031cd25c15092260e7 Mon Sep 17 00:00:00 2001 From: Arjit Chaudhary Date: Wed, 11 Sep 2019 17:03:47 +0530 Subject: [PATCH 06/53] =?UTF-8?q?Refactor=20"store/sqlstore/channel=5Fmemb?= =?UTF-8?q?er=5Fhistory=5Fstore.go"=20to=20u=E2=80=A6=20(#12059)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- store/sqlstore/channel_member_history_store.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/store/sqlstore/channel_member_history_store.go b/store/sqlstore/channel_member_history_store.go index 6d96634cbc..ae2c319553 100644 --- a/store/sqlstore/channel_member_history_store.go +++ b/store/sqlstore/channel_member_history_store.go @@ -4,10 +4,8 @@ package sqlstore import ( - "fmt" - "net/http" - "database/sql" + "net/http" "github.com/mattermost/mattermost-server/mlog" "github.com/mattermost/mattermost-server/model" @@ -62,7 +60,7 @@ func (s SqlChannelMemberHistoryStore) LogLeaveEvent(userId string, channelId str if rows, err := sqlResult.RowsAffected(); err == nil && rows != 1 { // there was no join event to update - this is best effort, so no need to raise an error - mlog.Warn(fmt.Sprintf("Channel join event for user %v and channel %v not found", userId, channelId), mlog.String("user_id", userId)) + mlog.Warn("Channel join event for user and channel not found", mlog.String("user", userId), mlog.String("channel", channelId)) } return nil } From 4677b50811bc8b50e2f9178f7c23c79526d13b78 Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Wed, 11 Sep 2019 07:34:51 -0700 Subject: [PATCH 07/53] Updated mattermost-plugin-jira to v2.1.2 to fix a failed build (#12151) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2dc873582c..165bcd9d8b 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,7 @@ PLUGIN_PACKAGES += mattermost-plugin-github-v0.10.2 PLUGIN_PACKAGES += mattermost-plugin-welcomebot-v1.1.0 PLUGIN_PACKAGES += mattermost-plugin-aws-SNS-v1.0.2 PLUGIN_PACKAGES += mattermost-plugin-antivirus-v0.1.1 -PLUGIN_PACKAGES += mattermost-plugin-jira-v2.1.1 +PLUGIN_PACKAGES += mattermost-plugin-jira-v2.1.2 PLUGIN_PACKAGES += mattermost-plugin-gitlab-v1.0.0 PLUGIN_PACKAGES += mattermost-plugin-jenkins-v1.0.0 From df85bd422ffd3aa3dc1d38b956a0bef5648040e4 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 11 Sep 2019 16:52:12 +0200 Subject: [PATCH 08/53] Converting to structured logging the file app/notification.go (#12140) --- app/notification.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/app/notification.go b/app/notification.go index 375fcac4dd..1dc1fdbd95 100644 --- a/app/notification.go +++ b/app/notification.go @@ -4,7 +4,6 @@ package app import ( - "fmt" "sort" "strings" "unicode" @@ -175,14 +174,14 @@ func (a *App) SendNotifications(post *model.Post, team *model.Team, channel *mod // Remove the user as recipient when the user has muted the channel. if channelMuted, ok := channelMemberNotifyPropsMap[id][model.MARK_UNREAD_NOTIFY_PROP]; ok { if channelMuted == model.CHANNEL_MARK_UNREAD_MENTION { - mlog.Debug(fmt.Sprintf("Channel muted for user_id %v, channel_mute %v", id, channelMuted)) + mlog.Debug("Channel muted for user", mlog.String("user_id", id), mlog.String("channel_mute", channelMuted)) userAllowsEmails = false } } //If email verification is required and user email is not verified don't send email. if *a.Config().EmailSettings.RequireEmailVerification && !profileMap[id].EmailVerified { - mlog.Error(fmt.Sprintf("Skipped sending notification email to %v, address not verified. [details: user_id=%v]", profileMap[id].Email, id)) + mlog.Error("Skipped sending notification email, address not verified.", mlog.String("user_email", profileMap[id].Email), mlog.String("user_id", id)) continue } @@ -250,7 +249,12 @@ func (a *App) SendNotifications(post *model.Post, team *model.Team, channel *mod // MUST be completed before push notifications send for _, umc := range updateMentionChans { if err := <-umc; err != nil { - mlog.Warn(fmt.Sprintf("Failed to update mention count, post_id=%v channel_id=%v err=%v", post.Id, post.ChannelId, result.Err), mlog.String("post_id", post.Id)) + mlog.Warn( + "Failed to update mention count", + mlog.String("post_id", post.Id), + mlog.String("channel_id", post.ChannelId), + mlog.Err(err), + ) } } @@ -352,7 +356,7 @@ func (a *App) SendNotifications(post *model.Post, team *model.Team, channel *mod var infos []*model.FileInfo if result := <-fchan; result.Err != nil { - mlog.Warn(fmt.Sprint("Unable to get fileInfo for push notifications.", post.Id, result.Err), mlog.String("post_id", post.Id)) + mlog.Warn("Unable to get fileInfo for push notifications.", mlog.String("post_id", post.Id), mlog.Err(result.Err)) } else { infos = result.Data.([]*model.FileInfo) } From a5f04802e8e860f32fc4f063ca1658410add4bd2 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 11 Sep 2019 17:16:33 +0200 Subject: [PATCH 09/53] =?UTF-8?q?MM-18266=20Converting=20to=20structured?= =?UTF-8?q?=20logging=20the=20file=20app/web=5Fcon=E2=80=A6=20(#12141)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/web_conn.go | 44 ++++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/app/web_conn.go b/app/web_conn.go index bec10c8c6e..bc014371ca 100644 --- a/app/web_conn.go +++ b/app/web_conn.go @@ -144,9 +144,9 @@ func (c *WebConn) readPump() { if err := c.WebSocket.ReadJSON(&req); err != nil { // browsers will appear as CloseNoStatusReceived if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { - mlog.Debug(fmt.Sprintf("websocket.read: client side closed socket userId=%v", c.UserId)) + mlog.Debug("websocket.read: client side closed socket", mlog.String("user_id", c.UserId)) } else { - mlog.Debug(fmt.Sprintf("websocket.read: closing websocket for userId=%v error=%v", c.UserId, err.Error())) + mlog.Debug("websocket.read: closing websocket", mlog.String("user_id", c.UserId), mlog.Err(err)) } return } @@ -181,7 +181,12 @@ func (c *WebConn) writePump() { if msg.EventType() == model.WEBSOCKET_EVENT_TYPING || msg.EventType() == model.WEBSOCKET_EVENT_STATUS_CHANGE || msg.EventType() == model.WEBSOCKET_EVENT_CHANNEL_VIEWED { - mlog.Info(fmt.Sprintf("websocket.slow: dropping message userId=%v type=%v channelId=%v", c.UserId, msg.EventType(), evt.Broadcast.ChannelId)) + mlog.Info( + "websocket.slow: dropping message", + mlog.String("user_id", c.UserId), + mlog.String("type", msg.EventType()), + mlog.String("channel_id", evt.Broadcast.ChannelId), + ) skipSend = true } } @@ -200,9 +205,20 @@ func (c *WebConn) writePump() { if len(c.Send) >= SEND_DEADLOCK_WARN { if evtOk { - mlog.Warn(fmt.Sprintf("websocket.full: message userId=%v type=%v channelId=%v size=%v", c.UserId, msg.EventType(), evt.Broadcast.ChannelId, len(msg.ToJson()))) + mlog.Warn( + "websocket.full", + mlog.String("user_id", c.UserId), + mlog.String("type", msg.EventType()), + mlog.String("channel_id", evt.Broadcast.ChannelId), + mlog.Int("size", len(msg.ToJson())), + ) } else { - mlog.Warn(fmt.Sprintf("websocket.full: message userId=%v type=%v size=%v", c.UserId, msg.EventType(), len(msg.ToJson()))) + mlog.Warn( + "websocket.full", + mlog.String("user_id", c.UserId), + mlog.String("type", msg.EventType()), + mlog.Int("size", len(msg.ToJson())), + ) } } @@ -210,9 +226,9 @@ func (c *WebConn) writePump() { if err := c.WebSocket.WriteMessage(websocket.TextMessage, msgBytes); err != nil { // browsers will appear as CloseNoStatusReceived if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { - mlog.Debug(fmt.Sprintf("websocket.send: client side closed socket userId=%v", c.UserId)) + mlog.Debug("websocket.send: client side closed socket", mlog.String("user_id", c.UserId)) } else { - mlog.Debug(fmt.Sprintf("websocket.send: closing websocket for userId=%v, error=%v", c.UserId, err.Error())) + mlog.Debug("websocket.send: closing websocket", mlog.String("user_id", c.UserId), mlog.Err(err)) } return } @@ -229,9 +245,9 @@ func (c *WebConn) writePump() { if err := c.WebSocket.WriteMessage(websocket.PingMessage, []byte{}); err != nil { // browsers will appear as CloseNoStatusReceived if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { - mlog.Debug(fmt.Sprintf("websocket.ticker: client side closed socket userId=%v", c.UserId)) + mlog.Debug("websocket.ticker: client side closed socket", mlog.String("user_id", c.UserId)) } else { - mlog.Debug(fmt.Sprintf("websocket.ticker: closing websocket for userId=%v error=%v", c.UserId, err.Error())) + mlog.Debug("websocket.ticker: closing websocket", mlog.String("user_id", c.UserId), mlog.Err(err)) } return } @@ -241,7 +257,7 @@ func (c *WebConn) writePump() { case <-authTicker.C: if c.GetSessionToken() == "" { - mlog.Debug(fmt.Sprintf("websocket.authTicker: did not authenticate ip=%v", c.WebSocket.RemoteAddr())) + mlog.Debug("websocket.authTicker: did not authenticate", mlog.Any("ip_address", c.WebSocket.RemoteAddr())) return } authTicker.Stop() @@ -265,7 +281,7 @@ func (webCon *WebConn) IsAuthenticated() bool { session, err := webCon.App.GetSession(webCon.GetSessionToken()) if err != nil { - mlog.Error(fmt.Sprintf("Invalid session err=%v", err.Error())) + mlog.Error("Invalid session.", mlog.Err(err)) webCon.SetSessionToken("") webCon.SetSession(nil) webCon.SetSessionExpiresAt(0) @@ -338,7 +354,7 @@ func (webCon *WebConn) ShouldSendEvent(msg *model.WebSocketEvent) bool { if webCon.AllChannelMembers == nil { result, err := webCon.App.Srv.Store.Channel().GetAllChannelMembersForUser(webCon.UserId, true, false) if err != nil { - mlog.Error("webhub.shouldSendEvent: " + err.Error()) + mlog.Error("webhub.shouldSendEvent.", mlog.Err(err)) return false } webCon.AllChannelMembers = result @@ -359,7 +375,7 @@ func (webCon *WebConn) ShouldSendEvent(msg *model.WebSocketEvent) bool { if msg.Event == model.WEBSOCKET_EVENT_USER_UPDATED && webCon.GetSession().Props[model.SESSION_PROP_IS_GUEST] == "true" { canSee, err := webCon.App.UserCanSeeOtherUser(webCon.UserId, msg.Data["user"].(*model.User).Id) if err != nil { - mlog.Error("webhub.shouldSendEvent: " + err.Error()) + mlog.Error("webhub.shouldSendEvent.", mlog.Err(err)) return false } return canSee @@ -374,7 +390,7 @@ func (webCon *WebConn) IsMemberOfTeam(teamId string) bool { if currentSession == nil || len(currentSession.Token) == 0 { session, err := webCon.App.GetSession(webCon.GetSessionToken()) if err != nil { - mlog.Error(fmt.Sprintf("Invalid session err=%v", err.Error())) + mlog.Error("Invalid session.", mlog.Err(err)) return false } webCon.SetSession(session) From 98489b9e67d97488f4c04732e8eb18e7ccd3d75a Mon Sep 17 00:00:00 2001 From: Theo Gkourasas Date: Wed, 11 Sep 2019 11:31:51 -0400 Subject: [PATCH 10/53] =?UTF-8?q?MM-15219=20Add=20support=20for=20introduc?= =?UTF-8?q?tion=20text=20in=20interactive=20dial=E2=80=A6=20(#12029)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- api4/integration_action_test.go | 14 +++++++++++++- model/integration_action.go | 15 ++++++++------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/api4/integration_action_test.go b/api4/integration_action_test.go index a28ab51fb7..3838d321fb 100644 --- a/api4/integration_action_test.go +++ b/api4/integration_action_test.go @@ -142,8 +142,20 @@ func TestOpenDialog(t *testing.T) { CheckBadRequestStatus(t, resp) assert.False(t, pass) - // Should pass with no elements + // Should pass with markdown formatted introduction text request.URL = "http://localhost:8065" + request.Dialog.IntroductionText = "**Some** _introduction text" + pass, resp = Client.OpenInteractiveDialog(request) + CheckNoError(t, resp) + assert.True(t, pass) + + // Should pass with empty introduction text + request.Dialog.IntroductionText = "" + pass, resp = Client.OpenInteractiveDialog(request) + CheckNoError(t, resp) + assert.True(t, pass) + + // Should pass with no elements request.Dialog.Elements = nil pass, resp = Client.OpenInteractiveDialog(request) CheckNoError(t, resp) diff --git a/model/integration_action.go b/model/integration_action.go index 4490e4fde0..82e402b531 100644 --- a/model/integration_action.go +++ b/model/integration_action.go @@ -178,13 +178,14 @@ type PostActionAPIResponse struct { } type Dialog struct { - CallbackId string `json:"callback_id"` - Title string `json:"title"` - IconURL string `json:"icon_url"` - Elements []DialogElement `json:"elements"` - SubmitLabel string `json:"submit_label"` - NotifyOnCancel bool `json:"notify_on_cancel"` - State string `json:"state"` + CallbackId string `json:"callback_id"` + Title string `json:"title"` + IntroductionText string `json:"introduction_text"` + IconURL string `json:"icon_url"` + Elements []DialogElement `json:"elements"` + SubmitLabel string `json:"submit_label"` + NotifyOnCancel bool `json:"notify_on_cancel"` + State string `json:"state"` } type DialogElement struct { From 9c3fc4ab9f30fdc8669c5f646f8e09dfe140e4f8 Mon Sep 17 00:00:00 2001 From: Paulo Bittencourt Date: Wed, 11 Sep 2019 11:35:51 -0400 Subject: [PATCH 11/53] [MM-15220] Add generic error field to SubmitDialogResponse (#12081) This will allow integrations to return an generic error message that is not tied to a specific dialog field. --- app/integration_action_test.go | 2 ++ model/integration_action.go | 1 + model/integration_action_test.go | 1 + 3 files changed, 4 insertions(+) diff --git a/app/integration_action_test.go b/app/integration_action_test.go index d7eaca982a..d204efcd23 100644 --- a/app/integration_action_test.go +++ b/app/integration_action_test.go @@ -430,6 +430,7 @@ func TestSubmitInteractiveDialog(t *testing.T) { assert.Equal(t, "value1", val) resp := model.SubmitDialogResponse{ + Error: "some generic error", Errors: map[string]string{"name1": "some error"}, } @@ -444,6 +445,7 @@ func TestSubmitInteractiveDialog(t *testing.T) { resp, err := th.App.SubmitInteractiveDialog(submit) assert.Nil(t, err) require.NotNil(t, resp) + assert.Equal(t, "some generic error", resp.Error) assert.Equal(t, "some error", resp.Errors["name1"]) submit.URL = "" diff --git a/model/integration_action.go b/model/integration_action.go index 82e402b531..f57611f6d6 100644 --- a/model/integration_action.go +++ b/model/integration_action.go @@ -222,6 +222,7 @@ type SubmitDialogRequest struct { } type SubmitDialogResponse struct { + Error string `json:"error,omitempty"` Errors map[string]string `json:"errors,omitempty"` } diff --git a/model/integration_action_test.go b/model/integration_action_test.go index 80c582b2ef..2c7251af09 100644 --- a/model/integration_action_test.go +++ b/model/integration_action_test.go @@ -141,6 +141,7 @@ func TestSubmitDialogRequestToJson(t *testing.T) { func TestSubmitDialogResponseToJson(t *testing.T) { t.Run("all fine", func(t *testing.T) { request := SubmitDialogResponse{ + Error: "some generic error", Errors: map[string]string{ "text": "some text", "float": "1.2", From be25dcd6f3e13d6d281f5c5135f8c3f43ceccf45 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 11 Sep 2019 20:19:13 +0200 Subject: [PATCH 12/53] Converting to structured logging the file jobs/jobs.go (#12120) --- jobs/jobs.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/jobs/jobs.go b/jobs/jobs.go index 8c97a450fc..d596bd044e 100644 --- a/jobs/jobs.go +++ b/jobs/jobs.go @@ -5,7 +5,6 @@ package jobs import ( "context" - "fmt" "time" "net/http" @@ -135,10 +134,10 @@ func (srv *JobServer) CancellationWatcher(ctx context.Context, jobId string, can for { select { case <-ctx.Done(): - mlog.Debug(fmt.Sprintf("CancellationWatcher for Job: %v Aborting as job has finished.", jobId)) + mlog.Debug("CancellationWatcher for Job Aborting as job has finished.", mlog.String("job_id", jobId)) return case <-time.After(CANCEL_WATCHER_POLLING_INTERVAL * time.Millisecond): - mlog.Debug(fmt.Sprintf("CancellationWatcher for Job: %v polling.", jobId)) + mlog.Debug("CancellationWatcher for Job started polling.", mlog.String("job_id", jobId)) if jobStatus, err := srv.Store.Job().Get(jobId); err == nil { if jobStatus.Status == model.JOB_STATUS_CANCEL_REQUESTED { close(cancelChan) From 36189470949d8d54bf90ee3a6abf4358e50bcd28 Mon Sep 17 00:00:00 2001 From: Elias Nahum Date: Wed, 11 Sep 2019 17:30:35 -0300 Subject: [PATCH 13/53] MM-18066 Remove double @ for email notifications (#12131) * Remove double @ for email notifications * Fix tests --- app/notification_email_test.go | 32 ++++++++++++++++---------------- i18n/en.json | 12 ++++++------ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/app/notification_email_test.go b/app/notification_email_test.go index 7dcf1ff874..b7d1b0a133 100644 --- a/app/notification_email_test.go +++ b/app/notification_email_test.go @@ -109,8 +109,8 @@ func TestGetNotificationEmailBodyFullNotificationPublicChannel(t *testing.T) { if !strings.Contains(body, "Channel: "+channel.DisplayName) { t.Fatal("Expected email text 'Channel: " + channel.DisplayName + "'. Got " + body) } - if !strings.Contains(body, "@"+senderName+" - ") { - t.Fatal("Expected email text '@" + senderName + " - '. Got " + body) + if !strings.Contains(body, senderName+" - ") { + t.Fatal("Expected email text '" + senderName + " - '. Got " + body) } if !strings.Contains(body, post.Message) { t.Fatal("Expected email text '" + post.Message + "'. Got " + body) @@ -146,8 +146,8 @@ func TestGetNotificationEmailBodyFullNotificationGroupChannel(t *testing.T) { if !strings.Contains(body, "Channel: ChannelName") { t.Fatal("Expected email text 'Channel: ChannelName'. Got " + body) } - if !strings.Contains(body, "@"+senderName+" - ") { - t.Fatal("Expected email text '@" + senderName + " - '. Got " + body) + if !strings.Contains(body, senderName+" - ") { + t.Fatal("Expected email text '" + senderName + " - '. Got " + body) } if !strings.Contains(body, post.Message) { t.Fatal("Expected email text '" + post.Message + "'. Got " + body) @@ -183,8 +183,8 @@ func TestGetNotificationEmailBodyFullNotificationPrivateChannel(t *testing.T) { if !strings.Contains(body, "Channel: "+channel.DisplayName) { t.Fatal("Expected email text 'Channel: " + channel.DisplayName + "'. Got " + body) } - if !strings.Contains(body, "@"+senderName+" - ") { - t.Fatal("Expected email text '@" + senderName + " - '. Got " + body) + if !strings.Contains(body, senderName+" - ") { + t.Fatal("Expected email text '" + senderName + " - '. Got " + body) } if !strings.Contains(body, post.Message) { t.Fatal("Expected email text '" + post.Message + "'. Got " + body) @@ -217,8 +217,8 @@ func TestGetNotificationEmailBodyFullNotificationDirectChannel(t *testing.T) { if !strings.Contains(body, "You have a new Direct Message.") { t.Fatal("Expected email text 'You have a new Direct Message. Got " + body) } - if !strings.Contains(body, "@"+senderName+" - ") { - t.Fatal("Expected email text '@" + senderName + " - '. Got " + body) + if !strings.Contains(body, senderName+" - ") { + t.Fatal("Expected email text '" + senderName + " - '. Got " + body) } if !strings.Contains(body, post.Message) { t.Fatal("Expected email text '" + post.Message + "'. Got " + body) @@ -386,8 +386,8 @@ func TestGetNotificationEmailBodyGenericNotificationPublicChannel(t *testing.T) translateFunc := utils.GetUserTranslations("en") body := th.App.getNotificationEmailBody(recipient, post, channel, channelName, senderName, teamName, teamURL, emailNotificationContentsType, true, translateFunc) - if !strings.Contains(body, "You have a new notification from @"+senderName) { - t.Fatal("Expected email text 'You have a new notification from @" + senderName + "'. Got " + body) + if !strings.Contains(body, "You have a new notification from "+senderName) { + t.Fatal("Expected email text 'You have a new notification from " + senderName + "'. Got " + body) } if strings.Contains(body, "Channel: "+channel.DisplayName) { t.Fatal("Did not expect email text 'Channel: " + channel.DisplayName + "'. Got " + body) @@ -420,8 +420,8 @@ func TestGetNotificationEmailBodyGenericNotificationGroupChannel(t *testing.T) { translateFunc := utils.GetUserTranslations("en") body := th.App.getNotificationEmailBody(recipient, post, channel, channelName, senderName, teamName, teamURL, emailNotificationContentsType, true, translateFunc) - if !strings.Contains(body, "You have a new Group Message from @"+senderName) { - t.Fatal("Expected email text 'You have a new Group Message from @" + senderName + "'. Got " + body) + if !strings.Contains(body, "You have a new Group Message from "+senderName) { + t.Fatal("Expected email text 'You have a new Group Message from " + senderName + "'. Got " + body) } if strings.Contains(body, "CHANNEL: "+channel.DisplayName) { t.Fatal("Did not expect email text 'CHANNEL: " + channel.DisplayName + "'. Got " + body) @@ -454,8 +454,8 @@ func TestGetNotificationEmailBodyGenericNotificationPrivateChannel(t *testing.T) translateFunc := utils.GetUserTranslations("en") body := th.App.getNotificationEmailBody(recipient, post, channel, channelName, senderName, teamName, teamURL, emailNotificationContentsType, true, translateFunc) - if !strings.Contains(body, "You have a new notification from @"+senderName) { - t.Fatal("Expected email text 'You have a new notification from @" + senderName + "'. Got " + body) + if !strings.Contains(body, "You have a new notification from "+senderName) { + t.Fatal("Expected email text 'You have a new notification from " + senderName + "'. Got " + body) } if strings.Contains(body, "CHANNEL: "+channel.DisplayName) { t.Fatal("Did not expect email text 'CHANNEL: " + channel.DisplayName + "'. Got " + body) @@ -488,8 +488,8 @@ func TestGetNotificationEmailBodyGenericNotificationDirectChannel(t *testing.T) translateFunc := utils.GetUserTranslations("en") body := th.App.getNotificationEmailBody(recipient, post, channel, channelName, senderName, teamName, teamURL, emailNotificationContentsType, true, translateFunc) - if !strings.Contains(body, "You have a new Direct Message from @"+senderName) { - t.Fatal("Expected email text 'You have a new Direct Message from @" + senderName + "'. Got " + body) + if !strings.Contains(body, "You have a new Direct Message from "+senderName) { + t.Fatal("Expected email text 'You have a new Direct Message from " + senderName + "'. Got " + body) } if strings.Contains(body, "CHANNEL: "+channel.DisplayName) { t.Fatal("Did not expect email text 'CHANNEL: " + channel.DisplayName + "'. Got " + body) diff --git a/i18n/en.json b/i18n/en.json index 1901d6bcb6..7a417a6d99 100644 --- a/i18n/en.json +++ b/i18n/en.json @@ -3376,7 +3376,7 @@ }, { "id": "app.notification.body.intro.direct.generic", - "translation": "You have a new Direct Message from @{{.SenderName}}" + "translation": "You have a new Direct Message from {{.SenderName}}" }, { "id": "app.notification.body.intro.group_message.full", @@ -3384,7 +3384,7 @@ }, { "id": "app.notification.body.intro.group_message.generic", - "translation": "You have a new Group Message from @{{.SenderName}}" + "translation": "You have a new Group Message from {{.SenderName}}" }, { "id": "app.notification.body.intro.notification.full", @@ -3392,11 +3392,11 @@ }, { "id": "app.notification.body.intro.notification.generic", - "translation": "You have a new notification from @{{.SenderName}}" + "translation": "You have a new notification from {{.SenderName}}" }, { "id": "app.notification.body.text.direct.full", - "translation": "@{{.SenderName}} - {{.Hour}}:{{.Minute}} {{.TimeZone}}, {{.Month}} {{.Day}}" + "translation": "{{.SenderName}} - {{.Hour}}:{{.Minute}} {{.TimeZone}}, {{.Month}} {{.Day}}" }, { "id": "app.notification.body.text.direct.generic", @@ -3408,7 +3408,7 @@ }, { "id": "app.notification.body.text.group_message.full2", - "translation": "@{{.SenderName}} - {{.Hour}}:{{.Minute}} {{.TimeZone}}, {{.Month}} {{.Day}}" + "translation": "{{.SenderName}} - {{.Hour}}:{{.Minute}} {{.TimeZone}}, {{.Month}} {{.Day}}" }, { "id": "app.notification.body.text.group_message.generic", @@ -3420,7 +3420,7 @@ }, { "id": "app.notification.body.text.notification.full2", - "translation": "@{{.SenderName}} - {{.Hour}}:{{.Minute}} {{.TimeZone}}, {{.Month}} {{.Day}}" + "translation": "{{.SenderName}} - {{.Hour}}:{{.Minute}} {{.TimeZone}}, {{.Month}} {{.Day}}" }, { "id": "app.notification.body.text.notification.generic", From d327df12e68e2ad596d266811b5ebee33eb7bd17 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 05:49:27 +0200 Subject: [PATCH 14/53] MM-18264 Converting to structured logging the file app/emoji.go (#12139) --- app/emoji.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/app/emoji.go b/app/emoji.go index e1a0ecf83b..42222f1b1d 100644 --- a/app/emoji.go +++ b/app/emoji.go @@ -5,7 +5,6 @@ package app import ( "bytes" - "fmt" "image" "image/draw" "image/gif" @@ -285,13 +284,12 @@ func imageToPaletted(img image.Image) *image.Paletted { func (a *App) deleteEmojiImage(id string) { if err := a.MoveFile(getEmojiImagePath(id), "emoji/"+id+"/image_deleted"); err != nil { - mlog.Error(fmt.Sprintf("Failed to rename image when deleting emoji %v", id)) + mlog.Error("Failed to rename image when deleting emoji", mlog.String("emoji_id", id)) } } func (a *App) deleteReactionsForEmoji(emojiName string) { if err := a.Srv.Store.Reaction().DeleteAllWithEmojiName(emojiName); err != nil { - mlog.Warn(fmt.Sprintf("Unable to delete reactions when deleting emoji with emoji name %v", emojiName)) - mlog.Warn(fmt.Sprint(err)) + mlog.Warn("Unable to delete reactions when deleting emoji", mlog.String("emoji_name", emojiName), mlog.Err(err)) } } From 37d719d20cf8f5f117da2a18bb3a53fe0c90eb80 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 13:35:07 +0200 Subject: [PATCH 15/53] Converting to structured logging the file app/ratelimit.go (#12176) --- app/ratelimit.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/app/ratelimit.go b/app/ratelimit.go index 0a7715f25b..8c560cbaaf 100644 --- a/app/ratelimit.go +++ b/app/ratelimit.go @@ -4,7 +4,6 @@ package app import ( - "fmt" "math" "net/http" "strconv" @@ -77,14 +76,14 @@ func (rl *RateLimiter) GenerateKey(r *http.Request) string { func (rl *RateLimiter) RateLimitWriter(key string, w http.ResponseWriter) bool { limited, context, err := rl.throttledRateLimiter.RateLimit(key, 1) if err != nil { - mlog.Critical("Internal server error when rate limiting. Rate Limiting broken. Error:" + err.Error()) + mlog.Critical("Internal server error when rate limiting. Rate Limiting broken.", mlog.Err(err)) return false } setRateLimitHeaders(w, context) if limited { - mlog.Error(fmt.Sprintf("Denied due to throttling settings code=429 key=%v", key)) + mlog.Error("Denied due to throttling settings code=429", mlog.String("key", key)) http.Error(w, "limit exceeded", 429) } From 54c0e394f5d04e3a95dec9af2de10a6861f439ac Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 15:31:09 +0200 Subject: [PATCH 16/53] =?UTF-8?q?MM-18278=20Converting=20to=20structured?= =?UTF-8?q?=20logging=20the=20file=20plugin/supe=E2=80=A6=20(#12177)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- plugin/supervisor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/supervisor.go b/plugin/supervisor.go index d07ece8b1c..820b6bee8b 100644 --- a/plugin/supervisor.go +++ b/plugin/supervisor.go @@ -118,7 +118,7 @@ func (sup *supervisor) PerformHealthCheck() error { } } if pingErr != nil { - mlog.Debug(fmt.Sprintf("Error pinging plugin, error: %s", pingErr.Error())) + mlog.Debug("Error pinging plugin", mlog.Err(pingErr)) return fmt.Errorf("Plugin RPC connection is not responding") } } From 794ae335b3e78f98d451578fd502a58b872aa56a Mon Sep 17 00:00:00 2001 From: Elias Nahum Date: Thu, 12 Sep 2019 11:30:15 -0300 Subject: [PATCH 17/53] MM-17953 Set updateAt and remove from cache when a user is (de)activated (#12178) --- app/user.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app/user.go b/app/user.go index 2920eaefa8..0b9f5922dd 100644 --- a/app/user.go +++ b/app/user.go @@ -978,10 +978,11 @@ func (a *App) invalidateUserChannelMembersCaches(user *model.User) *model.AppErr } func (a *App) UpdateActive(user *model.User, active bool) (*model.User, *model.AppError) { + user.UpdateAt = model.GetMillis() if active { user.DeleteAt = 0 } else { - user.DeleteAt = model.GetMillis() + user.DeleteAt = user.UpdateAt } userUpdate, err := a.Srv.Store.User().Update(user, true) @@ -997,6 +998,7 @@ func (a *App) UpdateActive(user *model.User, active bool) (*model.User, *model.A } a.invalidateUserChannelMembersCaches(user) + a.InvalidateCacheForUser(user.Id) a.sendUpdatedUserEvent(*ruser) From b7a879b9850beaf3908c15833f36d07df401152a Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 17:27:04 +0200 Subject: [PATCH 18/53] Converting to structured logging the file app/post.go (#12138) --- app/post.go | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/app/post.go b/app/post.go index dc6f75103e..b54de9e1cc 100644 --- a/app/post.go +++ b/app/post.go @@ -75,7 +75,12 @@ func (a *App) CreatePostAsUser(post *model.Post, currentSessionId string) (*mode // Update the LastViewAt only if the post does not have from_webhook prop set (eg. Zapier app) if _, ok := post.Props["from_webhook"]; !ok { if _, err := a.MarkChannelsAsViewed([]string{post.ChannelId}, post.UserId, currentSessionId); err != nil { - mlog.Error(fmt.Sprintf("Encountered error updating last viewed, channel_id=%s, user_id=%s, err=%v", post.ChannelId, post.UserId, err)) + mlog.Error( + "Encountered error updating last viewed", + mlog.String("channel_id", post.ChannelId), + mlog.String("user_id", post.UserId), + mlog.Err(err), + ) } } @@ -228,7 +233,7 @@ func (a *App) CreatePost(post *model.Post, channel *model.Channel, triggerWebhoo post.Props["attachments"] = attachmentsInterface } if err != nil { - mlog.Error("Could not convert post attachments to map interface, err=%s" + err.Error()) + mlog.Error("Could not convert post attachments to map interface.", mlog.Err(err)) } } @@ -563,7 +568,7 @@ func (a *App) UpdatePost(post *model.Post, safeUpdate bool) (*model.Post, *model a.Srv.Go(func() { channel, chanErr := a.Srv.Store.Channel().GetForPost(rpost.Id) if chanErr != nil { - mlog.Error(fmt.Sprintf("Couldn't get channel %v for post %v for Elasticsearch indexing.", rpost.ChannelId, rpost.Id)) + mlog.Error("Couldn't get channel for post for Elasticsearch indexing.", mlog.String("channel_id", rpost.ChannelId), mlog.String("post_id", rpost.Id)) return } if err := a.Elasticsearch.IndexPost(rpost, channel.TeamId); err != nil { @@ -856,7 +861,7 @@ func (a *App) DeletePost(postId, deleteByID string) (*model.Post, *model.AppErro func (a *App) DeleteFlaggedPosts(postId string) { if err := a.Srv.Store.Preference().DeleteCategoryAndName(model.PREFERENCE_CATEGORY_FLAGGED_POST, postId); err != nil { - mlog.Warn(fmt.Sprintf("Unable to delete flagged post preference when deleting post, err=%v", err)) + mlog.Warn("Unable to delete flagged post preference when deleting post.", mlog.Err(err)) return } } @@ -867,7 +872,7 @@ func (a *App) DeletePostFiles(post *model.Post) { } if _, err := a.Srv.Store.FileInfo().DeleteForPost(post.Id); err != nil { - mlog.Warn(fmt.Sprintf("Encountered error when deleting files for post, post_id=%v, err=%v", post.Id, err), mlog.String("post_id", post.Id)) + mlog.Warn("Encountered error when deleting files for post", mlog.String("post_id", post.Id), mlog.Err(err)) } } @@ -949,7 +954,7 @@ func (a *App) convertChannelNamesToChannelIds(channels []string, userId string, for idx, channelName := range channels { channel, err := a.parseAndFetchChannelIdByNameFromInFilter(channelName, userId, teamId, includeDeletedChannels) if err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("error getting channel id by name from in filter", mlog.Err(err)) continue } channels[idx] = channel.Id @@ -960,7 +965,7 @@ func (a *App) convertChannelNamesToChannelIds(channels []string, userId string, func (a *App) convertUserNameToUserIds(usernames []string) []string { for idx, username := range usernames { if user, err := a.GetUserByUsername(username); err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("error getting user by username", mlog.String("user_name", username), mlog.Err(err)) } else { usernames[idx] = user.Id } @@ -1005,7 +1010,7 @@ func (a *App) esSearchPostsInTeamForUser(paramsList []*model.SearchParams, userI // We only allow the user to search in channels they are a member of. userChannels, err := a.GetChannelsForUser(teamId, userId, includeDeleted) if err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("error getting channel for user", mlog.Err(err)) return nil, err } @@ -1062,7 +1067,7 @@ func (a *App) SearchPostsInTeamForUser(terms string, userId string, teamId strin if strings.HasPrefix(channelName, "@") { channel, err := a.parseAndFetchChannelIdByNameFromInFilter(channelName, userId, teamId, includeDeletedChannels) if err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("error getting channel_id by name from in filter", mlog.Err(err)) continue } params.InChannels[idx] = channel.Name @@ -1072,7 +1077,7 @@ func (a *App) SearchPostsInTeamForUser(terms string, userId string, teamId strin if strings.HasPrefix(channelName, "@") { channel, err := a.parseAndFetchChannelIdByNameFromInFilter(channelName, userId, teamId, includeDeletedChannels) if err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("error getting channel_id by name from in filter", mlog.Err(err)) continue } params.ExcludedChannels[idx] = channel.Name From e8911b3e087c22389e5755237c7280f33fc580d0 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 17:58:16 +0200 Subject: [PATCH 19/53] Converting to structured logging the file app/analytics.go (#12171) --- app/analytics.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/app/analytics.go b/app/analytics.go index 363fcafd00..eedaf42c0c 100644 --- a/app/analytics.go +++ b/app/analytics.go @@ -4,8 +4,6 @@ package app import ( - "fmt" - "github.com/mattermost/mattermost-server/mlog" "github.com/mattermost/mattermost-server/model" "github.com/mattermost/mattermost-server/store" @@ -25,7 +23,7 @@ func (a *App) GetAnalytics(name string, teamId string) (model.AnalyticsRows, *mo } if systemUserCount > int64(*a.Config().AnalyticsSettings.MaxUsersForStatistics) { - mlog.Debug(fmt.Sprintf("More than %v users on the system, intensive queries skipped", *a.Config().AnalyticsSettings.MaxUsersForStatistics)) + mlog.Debug("More than limit users are on the system, intensive queries skipped", mlog.Int("limit", *a.Config().AnalyticsSettings.MaxUsersForStatistics)) skipIntensiveQueries = true } From e58aeb90a88d7efe3e71ebe66fe651397a6c4cd1 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 18:22:48 +0200 Subject: [PATCH 20/53] MM-18255 Converting to structured logging the file web/static.go (#12093) * Converting to structured logging the file web/static.go * change staticDir to clientDir in logs --- web/static.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/web/static.go b/web/static.go index 847f962d76..57c1126a4f 100644 --- a/web/static.go +++ b/web/static.go @@ -4,7 +4,6 @@ package web import ( - "fmt" "mime" "net/http" "path" @@ -28,7 +27,7 @@ func (w *Web) InitStatic() { } staticDir, _ := fileutils.FindDir(model.CLIENT_DIR) - mlog.Debug(fmt.Sprintf("Using client directory at %v", staticDir)) + mlog.Debug("Using client directory", mlog.String("clientDir", staticDir)) subpath, _ := utils.GetSubpathFromConfig(w.ConfigService.Config()) From 28cc7e7e364110bdbbb4cbee1076fd39671a58d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jes=C3=BAs=20Espino?= Date: Thu, 12 Sep 2019 18:52:45 +0200 Subject: [PATCH 21/53] Migrating roles and schemes to new Cache Layer (#11936) * Migrating roles and schemes to new Cache Layer * Adding missed license headers * Updating cache tests * Adding the cache layer to the testlib helper * Fixing cyclic dependency * fixing a bit of not-idiomatic error handling * Another small fix arrount idiomatic error handling --- store/layered_store.go | 70 +---------- store/layered_store_supplier.go | 20 --- store/local_cache_supplier.go | 30 +---- store/local_cache_supplier_roles.go | 96 --------------- store/local_cache_supplier_schemes.go | 64 ---------- store/localcachelayer/layer.go | 24 ++++ store/localcachelayer/main_test.go | 33 +++++ store/localcachelayer/reaction_layer_test.go | 56 +++------ store/localcachelayer/role_layer.go | 80 ++++++++++++ store/localcachelayer/role_layer_test.go | 69 +++++++++++ store/localcachelayer/scheme_layer.go | 58 +++++++++ store/localcachelayer/scheme_layer_test.go | 69 +++++++++++ store/redis_supplier_roles.go | 123 ------------------- store/redis_supplier_schemes.go | 40 ------ store/sqlstore/role_supplier.go | 31 +++-- store/sqlstore/scheme_supplier.go | 44 ++++--- store/sqlstore/supplier.go | 5 +- testlib/cluster.go | 8 +- 18 files changed, 410 insertions(+), 510 deletions(-) delete mode 100644 store/local_cache_supplier_roles.go delete mode 100644 store/local_cache_supplier_schemes.go create mode 100644 store/localcachelayer/role_layer.go create mode 100644 store/localcachelayer/role_layer_test.go create mode 100644 store/localcachelayer/scheme_layer.go create mode 100644 store/localcachelayer/scheme_layer_test.go delete mode 100644 store/redis_supplier_roles.go delete mode 100644 store/redis_supplier_schemes.go diff --git a/store/layered_store.go b/store/layered_store.go index 9d9f3c9a47..6316b2b3e2 100644 --- a/store/layered_store.go +++ b/store/layered_store.go @@ -8,7 +8,6 @@ import ( "github.com/mattermost/mattermost-server/einterfaces" "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" ) const ( @@ -22,8 +21,6 @@ type LayeredStoreDatabaseLayer interface { type LayeredStore struct { TmpContext context.Context - RoleStore RoleStore - SchemeStore SchemeStore DatabaseLayer LayeredStoreDatabaseLayer LocalCacheLayer *LocalCacheSupplier RedisLayer *RedisSupplier @@ -37,9 +34,6 @@ func NewLayeredStore(db LayeredStoreDatabaseLayer, metrics einterfaces.MetricsIn LocalCacheLayer: NewLocalCacheSupplier(metrics, cluster), } - store.RoleStore = &LayeredRoleStore{store} - store.SchemeStore = &LayeredSchemeStore{store} - // Setup the chain if ENABLE_EXPERIMENTAL_REDIS { mlog.Debug("Experimental redis enabled.") @@ -161,7 +155,7 @@ func (s *LayeredStore) Plugin() PluginStore { } func (s *LayeredStore) Role() RoleStore { - return s.RoleStore + return s.DatabaseLayer.Role() } func (s *LayeredStore) TermsOfService() TermsOfServiceStore { @@ -173,7 +167,7 @@ func (s *LayeredStore) UserTermsOfService() UserTermsOfServiceStore { } func (s *LayeredStore) Scheme() SchemeStore { - return s.SchemeStore + return s.DatabaseLayer.Scheme() } func (s *LayeredStore) Group() GroupStore { @@ -220,63 +214,3 @@ func (s *LayeredStore) TotalSearchDbConnections() int { func (s *LayeredStore) CheckIntegrity() <-chan IntegrityCheckResult { return s.DatabaseLayer.CheckIntegrity() } - -type LayeredRoleStore struct { - *LayeredStore -} - -func (s *LayeredRoleStore) Save(role *model.Role) (*model.Role, *model.AppError) { - return s.LayerChainHead.RoleSave(s.TmpContext, role) -} - -func (s *LayeredRoleStore) Get(roleId string) (*model.Role, *model.AppError) { - return s.LayerChainHead.RoleGet(s.TmpContext, roleId) -} - -func (s *LayeredRoleStore) GetAll() ([]*model.Role, *model.AppError) { - return s.LayerChainHead.RoleGetAll(s.TmpContext) -} - -func (s *LayeredRoleStore) GetByName(name string) (*model.Role, *model.AppError) { - return s.LayerChainHead.RoleGetByName(s.TmpContext, name) -} - -func (s *LayeredRoleStore) GetByNames(names []string) ([]*model.Role, *model.AppError) { - return s.LayerChainHead.RoleGetByNames(s.TmpContext, names) -} - -func (s *LayeredRoleStore) Delete(roldId string) (*model.Role, *model.AppError) { - return s.LayerChainHead.RoleDelete(s.TmpContext, roldId) -} - -func (s *LayeredRoleStore) PermanentDeleteAll() *model.AppError { - return s.LayerChainHead.RolePermanentDeleteAll(s.TmpContext) -} - -type LayeredSchemeStore struct { - *LayeredStore -} - -func (s *LayeredSchemeStore) Save(scheme *model.Scheme) (*model.Scheme, *model.AppError) { - return s.LayerChainHead.SchemeSave(s.TmpContext, scheme) -} - -func (s *LayeredSchemeStore) Get(schemeId string) (*model.Scheme, *model.AppError) { - return s.LayerChainHead.SchemeGet(s.TmpContext, schemeId) -} - -func (s *LayeredSchemeStore) GetByName(schemeName string) (*model.Scheme, *model.AppError) { - return s.LayerChainHead.SchemeGetByName(s.TmpContext, schemeName) -} - -func (s *LayeredSchemeStore) Delete(schemeId string) (*model.Scheme, *model.AppError) { - return s.LayerChainHead.SchemeDelete(s.TmpContext, schemeId) -} - -func (s *LayeredSchemeStore) GetAllPage(scope string, offset int, limit int) ([]*model.Scheme, *model.AppError) { - return s.LayerChainHead.SchemeGetAllPage(s.TmpContext, scope, offset, limit) -} - -func (s *LayeredSchemeStore) PermanentDeleteAll() *model.AppError { - return s.LayerChainHead.SchemePermanentDeleteAll(s.TmpContext) -} diff --git a/store/layered_store_supplier.go b/store/layered_store_supplier.go index 1bba88ca0e..0bccad868d 100644 --- a/store/layered_store_supplier.go +++ b/store/layered_store_supplier.go @@ -3,9 +3,6 @@ package store -import "github.com/mattermost/mattermost-server/model" -import "context" - type LayeredStoreSupplierResult struct { StoreResult } @@ -20,21 +17,4 @@ type LayeredStoreSupplier interface { // SetChainNext(LayeredStoreSupplier) Next() LayeredStoreSupplier - - // Roles - RoleSave(ctx context.Context, role *model.Role, hints ...LayeredStoreHint) (*model.Role, *model.AppError) - RoleGet(ctx context.Context, roleId string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) - RoleGetAll(ctx context.Context, hints ...LayeredStoreHint) ([]*model.Role, *model.AppError) - RoleGetByName(ctx context.Context, name string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) - RoleGetByNames(ctx context.Context, names []string, hints ...LayeredStoreHint) ([]*model.Role, *model.AppError) - RoleDelete(ctx context.Context, roldId string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) - RolePermanentDeleteAll(ctx context.Context, hints ...LayeredStoreHint) *model.AppError - - // Schemes - SchemeSave(ctx context.Context, scheme *model.Scheme, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) - SchemeGet(ctx context.Context, schemeId string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) - SchemeGetByName(ctx context.Context, schemeName string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) - SchemeDelete(ctx context.Context, schemeId string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) - SchemeGetAllPage(ctx context.Context, scope string, offset int, limit int, hints ...LayeredStoreHint) ([]*model.Scheme, *model.AppError) - SchemePermanentDeleteAll(ctx context.Context, hints ...LayeredStoreHint) *model.AppError } diff --git a/store/local_cache_supplier.go b/store/local_cache_supplier.go index 4f5667e5a8..6dc971c226 100644 --- a/store/local_cache_supplier.go +++ b/store/local_cache_supplier.go @@ -8,28 +8,16 @@ import ( "github.com/mattermost/mattermost-server/einterfaces" "github.com/mattermost/mattermost-server/model" - "github.com/mattermost/mattermost-server/utils" ) const ( - ROLE_CACHE_SIZE = 20000 - ROLE_CACHE_SEC = 30 * 60 - - SCHEME_CACHE_SIZE = 20000 - SCHEME_CACHE_SEC = 30 * 60 - - GROUP_CACHE_SIZE = 20000 - GROUP_CACHE_SEC = 30 * 60 - CLEAR_CACHE_MESSAGE_DATA = "" ) type LocalCacheSupplier struct { - next LayeredStoreSupplier - roleCache *utils.Cache - schemeCache *utils.Cache - metrics einterfaces.MetricsInterface - cluster einterfaces.ClusterInterface + next LayeredStoreSupplier + metrics einterfaces.MetricsInterface + cluster einterfaces.ClusterInterface } // Caching Interface @@ -46,14 +34,8 @@ type ObjectCache interface { func NewLocalCacheSupplier(metrics einterfaces.MetricsInterface, cluster einterfaces.ClusterInterface) *LocalCacheSupplier { supplier := &LocalCacheSupplier{ - roleCache: utils.NewLruWithParams(ROLE_CACHE_SIZE, "Role", ROLE_CACHE_SEC, model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES), - schemeCache: utils.NewLruWithParams(SCHEME_CACHE_SIZE, "Scheme", SCHEME_CACHE_SEC, model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_SCHEMES), - metrics: metrics, - cluster: cluster, - } - - if cluster != nil { - cluster.RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES, supplier.handleClusterInvalidateRole) + metrics: metrics, + cluster: cluster, } return supplier @@ -122,6 +104,4 @@ func (s *LocalCacheSupplier) doClearCacheCluster(cache ObjectCache) { } func (s *LocalCacheSupplier) Invalidate() { - s.doClearCacheCluster(s.roleCache) - s.doClearCacheCluster(s.schemeCache) } diff --git a/store/local_cache_supplier_roles.go b/store/local_cache_supplier_roles.go deleted file mode 100644 index 973ea6d1da..0000000000 --- a/store/local_cache_supplier_roles.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package store - -import ( - "context" - - "github.com/mattermost/mattermost-server/model" -) - -func (s *LocalCacheSupplier) handleClusterInvalidateRole(msg *model.ClusterMessage) { - if msg.Data == CLEAR_CACHE_MESSAGE_DATA { - s.roleCache.Purge() - } else { - s.roleCache.Remove(msg.Data) - } -} - -func (s *LocalCacheSupplier) RoleSave(ctx context.Context, role *model.Role, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - if len(role.Name) != 0 { - defer s.doInvalidateCacheCluster(s.roleCache, role.Name) - } - return s.Next().RoleSave(ctx, role, hints...) -} - -func (s *LocalCacheSupplier) RoleGet(ctx context.Context, roleId string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - // Roles are cached by name, as that is most commonly how they are looked up. - // This means that no caching is supported on roles being looked up by ID. - return s.Next().RoleGet(ctx, roleId, hints...) -} - -func (s *LocalCacheSupplier) RoleGetAll(ctx context.Context, hints ...LayeredStoreHint) ([]*model.Role, *model.AppError) { - // Roles are cached by name, as that is most commonly how they are looked up. - // This means that no caching is supported on roles being listed. - return s.Next().RoleGetAll(ctx, hints...) -} - -func (s *LocalCacheSupplier) RoleGetByName(ctx context.Context, name string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - if result := s.doStandardReadCache(ctx, s.roleCache, name, hints...); result != nil { - return result.Data.(*model.Role), nil - } - - role, err := s.Next().RoleGetByName(ctx, name, hints...) - if err != nil { - return nil, err - } - - result := NewSupplierResult() - result.Data = role - s.doStandardAddToCache(ctx, s.roleCache, name, result, hints...) - - return role, nil -} - -func (s *LocalCacheSupplier) RoleGetByNames(ctx context.Context, roleNames []string, hints ...LayeredStoreHint) ([]*model.Role, *model.AppError) { - var foundRoles []*model.Role - var rolesToQuery []string - - for _, roleName := range roleNames { - if result := s.doStandardReadCache(ctx, s.roleCache, roleName, hints...); result != nil { - foundRoles = append(foundRoles, result.Data.(*model.Role)) - } else { - rolesToQuery = append(rolesToQuery, roleName) - } - } - - rolesFound, err := s.Next().RoleGetByNames(ctx, rolesToQuery, hints...) - - for _, role := range rolesFound { - res := NewSupplierResult() - res.Data = role - s.doStandardAddToCache(ctx, s.roleCache, role.Name, res, hints...) - } - foundRoles = append(foundRoles, rolesFound...) - - return foundRoles, err -} - -func (s *LocalCacheSupplier) RoleDelete(ctx context.Context, roleId string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - role, err := s.Next().RoleDelete(ctx, roleId, hints...) - if err != nil { - return nil, err - } - - s.doInvalidateCacheCluster(s.roleCache, role.Name) - - return role, nil -} - -func (s *LocalCacheSupplier) RolePermanentDeleteAll(ctx context.Context, hints ...LayeredStoreHint) *model.AppError { - defer s.roleCache.Purge() - defer s.doClearCacheCluster(s.roleCache) - - return s.Next().RolePermanentDeleteAll(ctx, hints...) -} diff --git a/store/local_cache_supplier_schemes.go b/store/local_cache_supplier_schemes.go deleted file mode 100644 index b7a9a12607..0000000000 --- a/store/local_cache_supplier_schemes.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package store - -import ( - "context" - - "github.com/mattermost/mattermost-server/model" -) - -func (s *LocalCacheSupplier) handleClusterInvalidateScheme(msg *model.ClusterMessage) { - if msg.Data == CLEAR_CACHE_MESSAGE_DATA { - s.schemeCache.Purge() - } else { - s.schemeCache.Remove(msg.Data) - } -} - -func (s *LocalCacheSupplier) SchemeSave(ctx context.Context, scheme *model.Scheme, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - if len(scheme.Id) != 0 { - defer s.doInvalidateCacheCluster(s.schemeCache, scheme.Id) - } - return s.Next().SchemeSave(ctx, scheme, hints...) -} - -func (s *LocalCacheSupplier) SchemeGet(ctx context.Context, schemeId string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - if result := s.doStandardReadCache(ctx, s.schemeCache, schemeId, hints...); result != nil { - return result.Data.(*model.Scheme), nil - } - - scheme, err := s.Next().SchemeGet(ctx, schemeId, hints...) - if err != nil { - return nil, err - } - - result := NewSupplierResult() - result.Data = scheme - s.doStandardAddToCache(ctx, s.schemeCache, schemeId, result, hints...) - - return scheme, nil -} - -func (s *LocalCacheSupplier) SchemeGetByName(ctx context.Context, schemeName string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - return s.Next().SchemeGetByName(ctx, schemeName, hints...) -} - -func (s *LocalCacheSupplier) SchemeDelete(ctx context.Context, schemeId string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - defer s.doInvalidateCacheCluster(s.schemeCache, schemeId) - defer s.doClearCacheCluster(s.roleCache) - - return s.Next().SchemeDelete(ctx, schemeId, hints...) -} - -func (s *LocalCacheSupplier) SchemeGetAllPage(ctx context.Context, scope string, offset int, limit int, hints ...LayeredStoreHint) ([]*model.Scheme, *model.AppError) { - return s.Next().SchemeGetAllPage(ctx, scope, offset, limit, hints...) -} - -func (s *LocalCacheSupplier) SchemePermanentDeleteAll(ctx context.Context, hints ...LayeredStoreHint) *model.AppError { - defer s.doClearCacheCluster(s.schemeCache) - defer s.doClearCacheCluster(s.roleCache) - - return s.Next().SchemePermanentDeleteAll(ctx, hints...) -} diff --git a/store/localcachelayer/layer.go b/store/localcachelayer/layer.go index 9f369e1cd6..b799d5d163 100644 --- a/store/localcachelayer/layer.go +++ b/store/localcachelayer/layer.go @@ -14,6 +14,12 @@ const ( REACTION_CACHE_SIZE = 20000 REACTION_CACHE_SEC = 30 * 60 + ROLE_CACHE_SIZE = 20000 + ROLE_CACHE_SEC = 30 * 60 + + SCHEME_CACHE_SIZE = 20000 + SCHEME_CACHE_SEC = 30 * 60 + CLEAR_CACHE_MESSAGE_DATA = "" ) @@ -23,6 +29,10 @@ type LocalCacheStore struct { cluster einterfaces.ClusterInterface reaction LocalCacheReactionStore reactionCache *utils.Cache + role LocalCacheRoleStore + roleCache *utils.Cache + scheme LocalCacheSchemeStore + schemeCache *utils.Cache } func NewLocalCacheLayer(baseStore store.Store, metrics einterfaces.MetricsInterface, cluster einterfaces.ClusterInterface) LocalCacheStore { @@ -33,9 +43,15 @@ func NewLocalCacheLayer(baseStore store.Store, metrics einterfaces.MetricsInterf } localCacheStore.reactionCache = utils.NewLruWithParams(REACTION_CACHE_SIZE, "Reaction", REACTION_CACHE_SEC, model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS) localCacheStore.reaction = LocalCacheReactionStore{ReactionStore: baseStore.Reaction(), rootStore: &localCacheStore} + localCacheStore.roleCache = utils.NewLruWithParams(ROLE_CACHE_SIZE, "Role", ROLE_CACHE_SEC, model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES) + localCacheStore.role = LocalCacheRoleStore{RoleStore: baseStore.Role(), rootStore: &localCacheStore} + localCacheStore.schemeCache = utils.NewLruWithParams(SCHEME_CACHE_SIZE, "Scheme", SCHEME_CACHE_SEC, model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_SCHEMES) + localCacheStore.scheme = LocalCacheSchemeStore{SchemeStore: baseStore.Scheme(), rootStore: &localCacheStore} if cluster != nil { cluster.RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_REACTIONS, localCacheStore.reaction.handleClusterInvalidateReaction) + cluster.RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES, localCacheStore.role.handleClusterInvalidateRole) + cluster.RegisterClusterMessageHandler(model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_SCHEMES, localCacheStore.scheme.handleClusterInvalidateScheme) } return localCacheStore } @@ -44,6 +60,14 @@ func (s LocalCacheStore) Reaction() store.ReactionStore { return s.reaction } +func (s LocalCacheStore) Role() store.RoleStore { + return s.role +} + +func (s LocalCacheStore) Scheme() store.SchemeStore { + return s.scheme +} + func (s LocalCacheStore) DropAllTables() { s.Invalidate() s.Store.DropAllTables() diff --git a/store/localcachelayer/main_test.go b/store/localcachelayer/main_test.go index cb0466c656..6022826e81 100644 --- a/store/localcachelayer/main_test.go +++ b/store/localcachelayer/main_test.go @@ -6,11 +6,44 @@ package localcachelayer import ( "testing" + "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/store/storetest/mocks" "github.com/mattermost/mattermost-server/testlib" ) var mainHelper *testlib.MainHelper +func getMockStore() *mocks.Store { + mockStore := mocks.Store{} + + fakeReaction := model.Reaction{PostId: "123"} + mockReactionsStore := mocks.ReactionStore{} + mockReactionsStore.On("Save", &fakeReaction).Return(&model.Reaction{}, nil) + mockReactionsStore.On("Delete", &fakeReaction).Return(&model.Reaction{}, nil) + mockReactionsStore.On("GetForPost", "123", false).Return([]*model.Reaction{&fakeReaction}, nil) + mockReactionsStore.On("GetForPost", "123", true).Return([]*model.Reaction{&fakeReaction}, nil) + mockStore.On("Reaction").Return(&mockReactionsStore) + + fakeRole := model.Role{Id: "123", Name: "role-name"} + mockRolesStore := mocks.RoleStore{} + mockRolesStore.On("Save", &fakeRole).Return(&model.Role{}, nil) + mockRolesStore.On("Delete", "123").Return(&fakeRole, nil) + mockRolesStore.On("GetByName", "role-name").Return(&fakeRole, nil) + mockRolesStore.On("GetByNames", []string{"role-name"}).Return([]*model.Role{&fakeRole}, nil) + mockRolesStore.On("PermanentDeleteAll").Return(nil) + mockStore.On("Role").Return(&mockRolesStore) + + fakeScheme := model.Scheme{Id: "123", Name: "scheme-name"} + mockSchemesStore := mocks.SchemeStore{} + mockSchemesStore.On("Save", &fakeScheme).Return(&model.Scheme{}, nil) + mockSchemesStore.On("Delete", "123").Return(&model.Scheme{}, nil) + mockSchemesStore.On("Get", "123").Return(&fakeScheme, nil) + mockSchemesStore.On("PermanentDeleteAll").Return(nil) + mockStore.On("Scheme").Return(&mockSchemesStore) + + return &mockStore +} + func TestMain(m *testing.M) { mainHelper = testlib.NewMainHelperWithOptions(nil) defer mainHelper.Close() diff --git a/store/localcachelayer/reaction_layer_test.go b/store/localcachelayer/reaction_layer_test.go index b449b705dd..4f1437c717 100644 --- a/store/localcachelayer/reaction_layer_test.go +++ b/store/localcachelayer/reaction_layer_test.go @@ -21,72 +21,48 @@ func TestReactionStoreCache(t *testing.T) { fakeReaction := model.Reaction{PostId: "123"} t.Run("first call not cached, second cached and returning same data", func(t *testing.T) { - mockStore := mocks.Store{} - mockReactionsStore := mocks.ReactionStore{} - mockReactionsStore.On("Save", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("Delete", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("GetForPost", "123", false).Return([]*model.Reaction{&fakeReaction}, nil) - mockReactionsStore.On("GetForPost", "123", true).Return([]*model.Reaction{&fakeReaction}, nil) - mockStore.On("Reaction").Return(&mockReactionsStore) - cachedStore := NewLocalCacheLayer(&mockStore, nil, nil) + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) reaction, err := cachedStore.Reaction().GetForPost("123", true) require.Nil(t, err) assert.Equal(t, reaction, []*model.Reaction{&fakeReaction}) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 1) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 1) require.Nil(t, err) assert.Equal(t, reaction, []*model.Reaction{&fakeReaction}) cachedStore.Reaction().GetForPost("123", true) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 1) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 1) }) t.Run("first call not cached, second force no cached", func(t *testing.T) { - mockStore := mocks.Store{} - mockReactionsStore := mocks.ReactionStore{} - mockReactionsStore.On("Save", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("Delete", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("GetForPost", "123", false).Return([]*model.Reaction{&fakeReaction}, nil) - mockReactionsStore.On("GetForPost", "123", true).Return([]*model.Reaction{&fakeReaction}, nil) - mockStore.On("Reaction").Return(&mockReactionsStore) - cachedStore := NewLocalCacheLayer(&mockStore, nil, nil) + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) cachedStore.Reaction().GetForPost("123", true) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 1) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 1) cachedStore.Reaction().GetForPost("123", false) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 2) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 2) }) t.Run("first call not cached, save, and then not cached again", func(t *testing.T) { - mockStore := mocks.Store{} - mockReactionsStore := mocks.ReactionStore{} - mockReactionsStore.On("Save", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("Delete", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("GetForPost", "123", false).Return([]*model.Reaction{&fakeReaction}, nil) - mockReactionsStore.On("GetForPost", "123", true).Return([]*model.Reaction{&fakeReaction}, nil) - mockStore.On("Reaction").Return(&mockReactionsStore) - cachedStore := NewLocalCacheLayer(&mockStore, nil, nil) + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) cachedStore.Reaction().GetForPost("123", true) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 1) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 1) cachedStore.Reaction().Save(&fakeReaction) cachedStore.Reaction().GetForPost("123", true) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 2) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 2) }) t.Run("first call not cached, delete, and then not cached again", func(t *testing.T) { - mockStore := mocks.Store{} - mockReactionsStore := mocks.ReactionStore{} - mockReactionsStore.On("Save", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("Delete", &fakeReaction).Return(&model.Reaction{}, nil) - mockReactionsStore.On("GetForPost", "123", false).Return([]*model.Reaction{&fakeReaction}, nil) - mockReactionsStore.On("GetForPost", "123", true).Return([]*model.Reaction{&fakeReaction}, nil) - mockStore.On("Reaction").Return(&mockReactionsStore) - cachedStore := NewLocalCacheLayer(&mockStore, nil, nil) + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) cachedStore.Reaction().GetForPost("123", true) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 1) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 1) cachedStore.Reaction().Delete(&fakeReaction) cachedStore.Reaction().GetForPost("123", true) - mockReactionsStore.AssertNumberOfCalls(t, "GetForPost", 2) + mockStore.Reaction().(*mocks.ReactionStore).AssertNumberOfCalls(t, "GetForPost", 2) }) } diff --git a/store/localcachelayer/role_layer.go b/store/localcachelayer/role_layer.go new file mode 100644 index 0000000000..0cd03c1427 --- /dev/null +++ b/store/localcachelayer/role_layer.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package localcachelayer + +import ( + "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/store" +) + +type LocalCacheRoleStore struct { + store.RoleStore + rootStore *LocalCacheStore +} + +func (s *LocalCacheRoleStore) handleClusterInvalidateRole(msg *model.ClusterMessage) { + if msg.Data == CLEAR_CACHE_MESSAGE_DATA { + s.rootStore.roleCache.Purge() + } else { + s.rootStore.roleCache.Remove(msg.Data) + } +} + +func (s LocalCacheRoleStore) Save(role *model.Role) (*model.Role, *model.AppError) { + if len(role.Name) != 0 { + defer s.rootStore.doInvalidateCacheCluster(s.rootStore.roleCache, role.Name) + } + return s.RoleStore.Save(role) +} + +func (s LocalCacheRoleStore) GetByName(name string) (*model.Role, *model.AppError) { + if role := s.rootStore.doStandardReadCache(s.rootStore.roleCache, name); role != nil { + return role.(*model.Role), nil + } + + role, err := s.RoleStore.GetByName(name) + if err != nil { + return nil, err + } + s.rootStore.doStandardAddToCache(s.rootStore.roleCache, name, role) + return role, nil +} + +func (s LocalCacheRoleStore) GetByNames(names []string) ([]*model.Role, *model.AppError) { + var foundRoles []*model.Role + var rolesToQuery []string + + for _, roleName := range names { + if role := s.rootStore.doStandardReadCache(s.rootStore.roleCache, roleName); role != nil { + foundRoles = append(foundRoles, role.(*model.Role)) + } else { + rolesToQuery = append(rolesToQuery, roleName) + } + } + + roles, _ := s.RoleStore.GetByNames(rolesToQuery) + + if roles != nil { + for _, role := range roles { + s.rootStore.doStandardAddToCache(s.rootStore.roleCache, role.Name, role) + } + } + return append(foundRoles, roles...), nil +} + +func (s LocalCacheRoleStore) Delete(roleId string) (*model.Role, *model.AppError) { + role, err := s.RoleStore.Delete(roleId) + + if err == nil { + s.rootStore.doInvalidateCacheCluster(s.rootStore.roleCache, role.Name) + } + return role, err +} + +func (s LocalCacheRoleStore) PermanentDeleteAll() *model.AppError { + defer s.rootStore.roleCache.Purge() + defer s.rootStore.doClearCacheCluster(s.rootStore.roleCache) + + return s.RoleStore.PermanentDeleteAll() +} diff --git a/store/localcachelayer/role_layer_test.go b/store/localcachelayer/role_layer_test.go new file mode 100644 index 0000000000..c2e69a9223 --- /dev/null +++ b/store/localcachelayer/role_layer_test.go @@ -0,0 +1,69 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package localcachelayer + +import ( + "testing" + + "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/store/storetest" + "github.com/mattermost/mattermost-server/store/storetest/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRoleStore(t *testing.T) { + StoreTest(t, storetest.TestRoleStore) +} + +func TestRoleStoreCache(t *testing.T) { + fakeRole := model.Role{Id: "123", Name: "role-name"} + + t.Run("first call not cached, second cached and returning same data", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + role, err := cachedStore.Role().GetByName("role-name") + require.Nil(t, err) + assert.Equal(t, role, &fakeRole) + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 1) + require.Nil(t, err) + assert.Equal(t, role, &fakeRole) + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 1) + }) + + t.Run("first call not cached, save, and then not cached again", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 1) + cachedStore.Role().Save(&fakeRole) + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 2) + }) + + t.Run("first call not cached, delete, and then not cached again", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 1) + cachedStore.Role().Delete("123") + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 2) + }) + + t.Run("first call not cached, permanent delete all, and then not cached again", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 1) + cachedStore.Role().PermanentDeleteAll() + cachedStore.Role().GetByName("role-name") + mockStore.Role().(*mocks.RoleStore).AssertNumberOfCalls(t, "GetByName", 2) + }) +} diff --git a/store/localcachelayer/scheme_layer.go b/store/localcachelayer/scheme_layer.go new file mode 100644 index 0000000000..cf274e8ad5 --- /dev/null +++ b/store/localcachelayer/scheme_layer.go @@ -0,0 +1,58 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package localcachelayer + +import ( + "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/store" +) + +type LocalCacheSchemeStore struct { + store.SchemeStore + rootStore *LocalCacheStore +} + +func (s *LocalCacheSchemeStore) handleClusterInvalidateScheme(msg *model.ClusterMessage) { + if msg.Data == CLEAR_CACHE_MESSAGE_DATA { + s.rootStore.schemeCache.Purge() + } else { + s.rootStore.schemeCache.Remove(msg.Data) + } +} + +func (s LocalCacheSchemeStore) Save(scheme *model.Scheme) (*model.Scheme, *model.AppError) { + if len(scheme.Id) != 0 { + defer s.rootStore.doInvalidateCacheCluster(s.rootStore.schemeCache, scheme.Id) + } + return s.SchemeStore.Save(scheme) +} + +func (s LocalCacheSchemeStore) Get(schemeId string) (*model.Scheme, *model.AppError) { + if scheme := s.rootStore.doStandardReadCache(s.rootStore.schemeCache, schemeId); scheme != nil { + return scheme.(*model.Scheme), nil + } + + scheme, err := s.SchemeStore.Get(schemeId) + if err != nil { + return nil, err + } + + s.rootStore.doStandardAddToCache(s.rootStore.schemeCache, schemeId, scheme) + + return scheme, nil +} + +func (s LocalCacheSchemeStore) Delete(schemeId string) (*model.Scheme, *model.AppError) { + defer s.rootStore.doInvalidateCacheCluster(s.rootStore.schemeCache, schemeId) + defer s.rootStore.doClearCacheCluster(s.rootStore.roleCache) + + return s.SchemeStore.Delete(schemeId) +} + +func (s LocalCacheSchemeStore) PermanentDeleteAll() *model.AppError { + defer s.rootStore.doClearCacheCluster(s.rootStore.schemeCache) + defer s.rootStore.doClearCacheCluster(s.rootStore.roleCache) + + return s.SchemeStore.PermanentDeleteAll() +} diff --git a/store/localcachelayer/scheme_layer_test.go b/store/localcachelayer/scheme_layer_test.go new file mode 100644 index 0000000000..e592358a8d --- /dev/null +++ b/store/localcachelayer/scheme_layer_test.go @@ -0,0 +1,69 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package localcachelayer + +import ( + "testing" + + "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/store/storetest" + "github.com/mattermost/mattermost-server/store/storetest/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSchemeStore(t *testing.T) { + StoreTest(t, storetest.TestSchemeStore) +} + +func TestSchemeStoreCache(t *testing.T) { + fakeScheme := model.Scheme{Id: "123", Name: "scheme-name"} + + t.Run("first call not cached, second cached and returning same data", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + scheme, err := cachedStore.Scheme().Get("123") + require.Nil(t, err) + assert.Equal(t, scheme, &fakeScheme) + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 1) + require.Nil(t, err) + assert.Equal(t, scheme, &fakeScheme) + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 1) + }) + + t.Run("first call not cached, save, and then not cached again", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 1) + cachedStore.Scheme().Save(&fakeScheme) + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 2) + }) + + t.Run("first call not cached, delete, and then not cached again", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 1) + cachedStore.Scheme().Delete("123") + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 2) + }) + + t.Run("first call not cached, permanent delete all, and then not cached again", func(t *testing.T) { + mockStore := getMockStore() + cachedStore := NewLocalCacheLayer(mockStore, nil, nil) + + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 1) + cachedStore.Scheme().PermanentDeleteAll() + cachedStore.Scheme().Get("123") + mockStore.Scheme().(*mocks.SchemeStore).AssertNumberOfCalls(t, "Get", 2) + }) +} diff --git a/store/redis_supplier_roles.go b/store/redis_supplier_roles.go deleted file mode 100644 index 8b7175b86f..0000000000 --- a/store/redis_supplier_roles.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package store - -import ( - "context" - "fmt" - - "github.com/mattermost/mattermost-server/mlog" - "github.com/mattermost/mattermost-server/model" -) - -func (s *RedisSupplier) RoleSave(ctx context.Context, role *model.Role, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - key := buildRedisKeyForRoleName(role.Name) - - defer func() { - if err := s.client.Del(key).Err(); err != nil { - mlog.Error("Redis failed to remove key " + key + " Error: " + err.Error()) - } - }() - - return s.Next().RoleSave(ctx, role, hints...) -} - -func (s *RedisSupplier) RoleGet(ctx context.Context, roleId string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - // Roles are cached by name, as that is most commonly how they are looked up. - // This means that no caching is supported on roles being looked up by ID. - return s.Next().RoleGet(ctx, roleId, hints...) -} - -func (s *RedisSupplier) RoleGetAll(ctx context.Context, hints ...LayeredStoreHint) ([]*model.Role, *model.AppError) { - // Roles are cached by name, as that is most commonly how they are looked up. - // This means that no caching is supported on roles being listed. - return s.Next().RoleGetAll(ctx, hints...) -} - -func (s *RedisSupplier) RoleGetByName(ctx context.Context, name string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - key := buildRedisKeyForRoleName(name) - - var role *model.Role - found, err := s.load(key, &role) - if err != nil { - mlog.Error("Redis encountered an error on read: " + err.Error()) - } else if found { - return role, nil - } - - role, appErr := s.Next().RoleGetByName(ctx, name, hints...) - - if appErr == nil { - if err := s.save(key, role, REDIS_EXPIRY_TIME); err != nil { - mlog.Error("Redis encountered and error on write: " + err.Error()) - } - } - - return role, appErr -} - -func (s *RedisSupplier) RoleGetByNames(ctx context.Context, roleNames []string, hints ...LayeredStoreHint) ([]*model.Role, *model.AppError) { - var foundRoles []*model.Role - var rolesToQuery []string - - for _, roleName := range roleNames { - var role *model.Role - found, err := s.load(buildRedisKeyForRoleName(roleName), &role) - if err == nil && found { - foundRoles = append(foundRoles, role) - } else { - rolesToQuery = append(rolesToQuery, roleName) - if err != nil { - mlog.Error("Redis encountered an error on read: " + err.Error()) - } - } - } - - rolesFound, appErr := s.Next().RoleGetByNames(ctx, rolesToQuery, hints...) - - if appErr == nil { - for _, role := range rolesFound { - if err := s.save(buildRedisKeyForRoleName(role.Name), role, REDIS_EXPIRY_TIME); err != nil { - mlog.Error("Redis encountered and error on write: " + err.Error()) - } - } - foundRoles = append(foundRoles, rolesFound...) - } - - return foundRoles, appErr -} - -func (s *RedisSupplier) RoleDelete(ctx context.Context, roleId string, hints ...LayeredStoreHint) (*model.Role, *model.AppError) { - role, appErr := s.Next().RoleGet(ctx, roleId, hints...) - - if appErr == nil { - defer func() { - key := buildRedisKeyForRoleName(role.Name) - - if err := s.client.Del(key).Err(); err != nil { - mlog.Error("Redis failed to remove key " + key + " Error: " + err.Error()) - } - }() - } - - return s.Next().RoleDelete(ctx, roleId, hints...) -} - -func (s *RedisSupplier) RolePermanentDeleteAll(ctx context.Context, hints ...LayeredStoreHint) *model.AppError { - defer func() { - if keys, err := s.client.Keys("roles:*").Result(); err != nil { - mlog.Error("Redis encountered an error on read: " + err.Error()) - } else { - if err := s.client.Del(keys...).Err(); err != nil { - mlog.Error("Redis encountered an error on delete: " + err.Error()) - } - } - }() - - return s.Next().RolePermanentDeleteAll(ctx, hints...) -} - -func buildRedisKeyForRoleName(roleName string) string { - return fmt.Sprintf("roles:%s", roleName) -} diff --git a/store/redis_supplier_schemes.go b/store/redis_supplier_schemes.go deleted file mode 100644 index 81839197d4..0000000000 --- a/store/redis_supplier_schemes.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2018-present Mattermost, Inc. All Rights Reserved. -// See License.txt for license information. - -package store - -import ( - "context" - - "github.com/mattermost/mattermost-server/model" -) - -func (s *RedisSupplier) SchemeSave(ctx context.Context, scheme *model.Scheme, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - // TODO: Redis caching. - return s.Next().SchemeSave(ctx, scheme, hints...) -} - -func (s *RedisSupplier) SchemeGet(ctx context.Context, schemeId string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - // TODO: Redis caching. - return s.Next().SchemeGet(ctx, schemeId, hints...) -} - -func (s *RedisSupplier) SchemeGetByName(ctx context.Context, schemeName string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - // TODO: Redis caching. - return s.Next().SchemeGetByName(ctx, schemeName, hints...) -} - -func (s *RedisSupplier) SchemeDelete(ctx context.Context, schemeId string, hints ...LayeredStoreHint) (*model.Scheme, *model.AppError) { - // TODO: Redis caching. - return s.Next().SchemeDelete(ctx, schemeId, hints...) -} - -func (s *RedisSupplier) SchemeGetAllPage(ctx context.Context, scope string, offset int, limit int, hints ...LayeredStoreHint) ([]*model.Scheme, *model.AppError) { - // TODO: Redis caching. - return s.Next().SchemeGetAllPage(ctx, scope, offset, limit, hints...) -} - -func (s *RedisSupplier) SchemePermanentDeleteAll(ctx context.Context, hints ...LayeredStoreHint) *model.AppError { - // TODO: Redis caching. - return s.Next().SchemePermanentDeleteAll(ctx, hints...) -} diff --git a/store/sqlstore/role_supplier.go b/store/sqlstore/role_supplier.go index e678fb3711..e6fc048726 100644 --- a/store/sqlstore/role_supplier.go +++ b/store/sqlstore/role_supplier.go @@ -4,7 +4,6 @@ package sqlstore import ( - "context" "database/sql" "fmt" "net/http" @@ -15,6 +14,10 @@ import ( "github.com/mattermost/mattermost-server/store" ) +type SqlRoleStore struct { + SqlStore +} + type Role struct { Id string Name string @@ -68,7 +71,9 @@ func (role Role) ToModel() *model.Role { } } -func initSqlSupplierRoles(sqlStore SqlStore) { +func NewSqlRoleStore(sqlStore SqlStore) store.RoleStore { + s := &SqlRoleStore{sqlStore} + for _, db := range sqlStore.GetAllConns() { table := db.AddTableWithName(Role{}, "Roles").SetKeys(false, "Id") table.ColMap("Id").SetMaxSize(26) @@ -77,9 +82,13 @@ func initSqlSupplierRoles(sqlStore SqlStore) { table.ColMap("Description").SetMaxSize(1024) table.ColMap("Permissions").SetMaxSize(4096) } + return s } -func (s *SqlSupplier) RoleSave(ctx context.Context, role *model.Role, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { +func (s SqlRoleStore) CreateIndexesIfNotExists() { +} + +func (s *SqlRoleStore) Save(role *model.Role) (*model.Role, *model.AppError) { // Check the role is valid before proceeding. if !role.IsValidWithoutId() { return nil, model.NewAppError("SqlRoleStore.Save", "store.sql_role.save.invalid_role.app_error", nil, "", http.StatusBadRequest) @@ -91,7 +100,7 @@ func (s *SqlSupplier) RoleSave(ctx context.Context, role *model.Role, hints ...s return nil, model.NewAppError("SqlRoleStore.RoleSave", "store.sql_role.save.open_transaction.app_error", nil, err.Error(), http.StatusInternalServerError) } defer finalizeTransaction(transaction) - createdRole, appErr := s.createRole(ctx, role, transaction, hints...) + createdRole, appErr := s.createRole(role, transaction) if appErr != nil { transaction.Rollback() return nil, appErr @@ -112,7 +121,7 @@ func (s *SqlSupplier) RoleSave(ctx context.Context, role *model.Role, hints ...s return dbRole.ToModel(), nil } -func (s *SqlSupplier) createRole(ctx context.Context, role *model.Role, transaction *gorp.Transaction, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { +func (s *SqlRoleStore) createRole(role *model.Role, transaction *gorp.Transaction) (*model.Role, *model.AppError) { // Check the role is valid before proceeding. if !role.IsValidWithoutId() { return nil, model.NewAppError("SqlRoleStore.Save", "store.sql_role.save.invalid_role.app_error", nil, "", http.StatusBadRequest) @@ -131,7 +140,7 @@ func (s *SqlSupplier) createRole(ctx context.Context, role *model.Role, transact return dbRole.ToModel(), nil } -func (s *SqlSupplier) RoleGet(ctx context.Context, roleId string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { +func (s *SqlRoleStore) Get(roleId string) (*model.Role, *model.AppError) { var dbRole Role if err := s.GetReplica().SelectOne(&dbRole, "SELECT * from Roles WHERE Id = :Id", map[string]interface{}{"Id": roleId}); err != nil { @@ -144,7 +153,7 @@ func (s *SqlSupplier) RoleGet(ctx context.Context, roleId string, hints ...store return dbRole.ToModel(), nil } -func (s *SqlSupplier) RoleGetAll(ctx context.Context, hints ...store.LayeredStoreHint) ([]*model.Role, *model.AppError) { +func (s *SqlRoleStore) GetAll() ([]*model.Role, *model.AppError) { var dbRoles []Role if _, err := s.GetReplica().Select(&dbRoles, "SELECT * from Roles", map[string]interface{}{}); err != nil { @@ -161,7 +170,7 @@ func (s *SqlSupplier) RoleGetAll(ctx context.Context, hints ...store.LayeredStor return roles, nil } -func (s *SqlSupplier) RoleGetByName(ctx context.Context, name string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { +func (s *SqlRoleStore) GetByName(name string) (*model.Role, *model.AppError) { var dbRole Role if err := s.GetReplica().SelectOne(&dbRole, "SELECT * from Roles WHERE Name = :Name", map[string]interface{}{"Name": name}); err != nil { @@ -174,7 +183,7 @@ func (s *SqlSupplier) RoleGetByName(ctx context.Context, name string, hints ...s return dbRole.ToModel(), nil } -func (s *SqlSupplier) RoleGetByNames(ctx context.Context, names []string, hints ...store.LayeredStoreHint) ([]*model.Role, *model.AppError) { +func (s *SqlRoleStore) GetByNames(names []string) ([]*model.Role, *model.AppError) { var dbRoles []*Role if len(names) == 0 { @@ -202,7 +211,7 @@ func (s *SqlSupplier) RoleGetByNames(ctx context.Context, names []string, hints return roles, nil } -func (s *SqlSupplier) RoleDelete(ctx context.Context, roleId string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { +func (s *SqlRoleStore) Delete(roleId string) (*model.Role, *model.AppError) { // Get the role. var role *Role if err := s.GetReplica().SelectOne(&role, "SELECT * from Roles WHERE Id = :Id", map[string]interface{}{"Id": roleId}); err != nil { @@ -224,7 +233,7 @@ func (s *SqlSupplier) RoleDelete(ctx context.Context, roleId string, hints ...st return role.ToModel(), nil } -func (s *SqlSupplier) RolePermanentDeleteAll(ctx context.Context, hints ...store.LayeredStoreHint) *model.AppError { +func (s *SqlRoleStore) PermanentDeleteAll() *model.AppError { if _, err := s.GetMaster().Exec("DELETE FROM Roles"); err != nil { return model.NewAppError("SqlRoleStore.PermanentDeleteAll", "store.sql_role.permanent_delete_all.app_error", nil, err.Error(), http.StatusInternalServerError) } diff --git a/store/sqlstore/scheme_supplier.go b/store/sqlstore/scheme_supplier.go index f125f8b27a..f0ec1830c6 100644 --- a/store/sqlstore/scheme_supplier.go +++ b/store/sqlstore/scheme_supplier.go @@ -4,7 +4,6 @@ package sqlstore import ( - "context" "database/sql" "fmt" "net/http" @@ -16,7 +15,13 @@ import ( "github.com/mattermost/mattermost-server/store" ) -func initSqlSupplierSchemes(sqlStore SqlStore) { +type SqlSchemeStore struct { + SqlStore +} + +func NewSqlSchemeStore(sqlStore SqlStore) store.SchemeStore { + s := &SqlSchemeStore{sqlStore} + for _, db := range sqlStore.GetAllConns() { table := db.AddTableWithName(model.Scheme{}, "Schemes").SetKeys(false, "Id") table.ColMap("Id").SetMaxSize(26) @@ -31,9 +36,14 @@ func initSqlSupplierSchemes(sqlStore SqlStore) { table.ColMap("DefaultChannelUserRole").SetMaxSize(64) table.ColMap("DefaultChannelGuestRole").SetMaxSize(64) } + + return s } -func (s *SqlSupplier) SchemeSave(ctx context.Context, scheme *model.Scheme, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { +func (s SqlSchemeStore) CreateIndexesIfNotExists() { +} + +func (s *SqlSchemeStore) Save(scheme *model.Scheme) (*model.Scheme, *model.AppError) { if len(scheme.Id) == 0 { transaction, err := s.GetMaster().Begin() if err != nil { @@ -41,7 +51,7 @@ func (s *SqlSupplier) SchemeSave(ctx context.Context, scheme *model.Scheme, hint } defer finalizeTransaction(transaction) - newScheme, appErr := s.createScheme(ctx, scheme, transaction, hints...) + newScheme, appErr := s.createScheme(scheme, transaction) if appErr != nil { return nil, appErr } @@ -68,11 +78,11 @@ func (s *SqlSupplier) SchemeSave(ctx context.Context, scheme *model.Scheme, hint return scheme, nil } -func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, transaction *gorp.Transaction, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { +func (s *SqlSchemeStore) createScheme(scheme *model.Scheme, transaction *gorp.Transaction) (*model.Scheme, *model.AppError) { // Fetch the default system scheme roles to populate default permissions. defaultRoleNames := []string{model.TEAM_ADMIN_ROLE_ID, model.TEAM_USER_ROLE_ID, model.TEAM_GUEST_ROLE_ID, model.CHANNEL_ADMIN_ROLE_ID, model.CHANNEL_USER_ROLE_ID, model.CHANNEL_GUEST_ROLE_ID} defaultRoles := make(map[string]*model.Role) - roles, err := s.RoleGetByNames(ctx, defaultRoleNames) + roles, err := s.SqlStore.Role().GetByNames(defaultRoleNames) if err != nil { return nil, err } @@ -108,7 +118,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr SchemeManaged: true, } - savedRole, err := s.createRole(ctx, teamAdminRole, transaction) + savedRole, err := s.SqlStore.Role().(*SqlRoleStore).createRole(teamAdminRole, transaction) if err != nil { return nil, err } @@ -122,7 +132,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr SchemeManaged: true, } - savedRole, err = s.createRole(ctx, teamUserRole, transaction) + savedRole, err = s.SqlStore.Role().(*SqlRoleStore).createRole(teamUserRole, transaction) if err != nil { return nil, err } @@ -136,7 +146,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr SchemeManaged: true, } - savedRole, err = s.createRole(ctx, teamGuestRole, transaction) + savedRole, err = s.SqlStore.Role().(*SqlRoleStore).createRole(teamGuestRole, transaction) if err != nil { return nil, err } @@ -151,7 +161,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr SchemeManaged: true, } - savedRole, err := s.createRole(ctx, channelAdminRole, transaction) + savedRole, err := s.SqlStore.Role().(*SqlRoleStore).createRole(channelAdminRole, transaction) if err != nil { return nil, err } @@ -165,7 +175,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr SchemeManaged: true, } - savedRole, err = s.createRole(ctx, channelUserRole, transaction) + savedRole, err = s.SqlStore.Role().(*SqlRoleStore).createRole(channelUserRole, transaction) if err != nil { return nil, err } @@ -179,7 +189,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr SchemeManaged: true, } - savedRole, err = s.createRole(ctx, channelGuestRole, transaction) + savedRole, err = s.SqlStore.Role().(*SqlRoleStore).createRole(channelGuestRole, transaction) if err != nil { return nil, err } @@ -205,7 +215,7 @@ func (s *SqlSupplier) createScheme(ctx context.Context, scheme *model.Scheme, tr return scheme, nil } -func (s *SqlSupplier) SchemeGet(ctx context.Context, schemeId string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { +func (s *SqlSchemeStore) Get(schemeId string) (*model.Scheme, *model.AppError) { var scheme model.Scheme if err := s.GetReplica().SelectOne(&scheme, "SELECT * from Schemes WHERE Id = :Id", map[string]interface{}{"Id": schemeId}); err != nil { if err == sql.ErrNoRows { @@ -217,7 +227,7 @@ func (s *SqlSupplier) SchemeGet(ctx context.Context, schemeId string, hints ...s return &scheme, nil } -func (s *SqlSupplier) SchemeGetByName(ctx context.Context, schemeName string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { +func (s *SqlSchemeStore) GetByName(schemeName string) (*model.Scheme, *model.AppError) { var scheme model.Scheme if err := s.GetReplica().SelectOne(&scheme, "SELECT * from Schemes WHERE Name = :Name", map[string]interface{}{"Name": schemeName}); err != nil { @@ -230,7 +240,7 @@ func (s *SqlSupplier) SchemeGetByName(ctx context.Context, schemeName string, hi return &scheme, nil } -func (s *SqlSupplier) SchemeDelete(ctx context.Context, schemeId string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { +func (s *SqlSchemeStore) Delete(schemeId string) (*model.Scheme, *model.AppError) { // Get the scheme var scheme model.Scheme if err := s.GetReplica().SelectOne(&scheme, "SELECT * from Schemes WHERE Id = :Id", map[string]interface{}{"Id": schemeId}); err != nil { @@ -290,7 +300,7 @@ func (s *SqlSupplier) SchemeDelete(ctx context.Context, schemeId string, hints . return &scheme, nil } -func (s *SqlSupplier) SchemeGetAllPage(ctx context.Context, scope string, offset int, limit int, hints ...store.LayeredStoreHint) ([]*model.Scheme, *model.AppError) { +func (s *SqlSchemeStore) GetAllPage(scope string, offset int, limit int) ([]*model.Scheme, *model.AppError) { var schemes []*model.Scheme scopeClause := "" @@ -305,7 +315,7 @@ func (s *SqlSupplier) SchemeGetAllPage(ctx context.Context, scope string, offset return schemes, nil } -func (s *SqlSupplier) SchemePermanentDeleteAll(ctx context.Context, hints ...store.LayeredStoreHint) *model.AppError { +func (s *SqlSchemeStore) PermanentDeleteAll() *model.AppError { if _, err := s.GetMaster().Exec("DELETE from Schemes"); err != nil { return model.NewAppError("SqlSchemeStore.PermanentDeleteAll", "store.sql_scheme.permanent_delete_all.app_error", nil, err.Error(), http.StatusInternalServerError) } diff --git a/store/sqlstore/supplier.go b/store/sqlstore/supplier.go index d6a5cf9f83..9d04cedf7b 100644 --- a/store/sqlstore/supplier.go +++ b/store/sqlstore/supplier.go @@ -152,11 +152,10 @@ func NewSqlSupplier(settings model.SqlSettings, metrics einterfaces.MetricsInter supplier.oldStores.UserTermsOfService = NewSqlUserTermsOfServiceStore(supplier) supplier.oldStores.linkMetadata = NewSqlLinkMetadataStore(supplier) supplier.oldStores.reaction = NewSqlReactionStore(supplier) + supplier.oldStores.role = NewSqlRoleStore(supplier) + supplier.oldStores.scheme = NewSqlSchemeStore(supplier) supplier.oldStores.group = NewSqlGroupStore(supplier) - initSqlSupplierRoles(supplier) - initSqlSupplierSchemes(supplier) - err := supplier.GetMaster().CreateTablesIfNotExists() if err != nil { mlog.Critical("Error creating database tables.", mlog.Err(err)) diff --git a/testlib/cluster.go b/testlib/cluster.go index 244c8e5b74..c8a75e28e4 100644 --- a/testlib/cluster.go +++ b/testlib/cluster.go @@ -48,9 +48,11 @@ func (c *FakeClusterInterface) ConfigChanged(previousConfig *model.Config, newCo } func (c *FakeClusterInterface) SendClearRoleCacheMessage() { - c.clusterMessageHandler(&model.ClusterMessage{ - Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES, - }) + if c.clusterMessageHandler != nil { + c.clusterMessageHandler(&model.ClusterMessage{ + Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_ROLES, + }) + } } func (c *FakeClusterInterface) GetPluginStatuses() (model.PluginStatuses, *model.AppError) { From 5fe50775784451e591507143d094dca89fed4e85 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Thu, 12 Sep 2019 18:55:31 +0200 Subject: [PATCH 22/53] =?UTF-8?q?MM-18257=20Converting=20to=20structured?= =?UTF-8?q?=20logging=20the=20file=20utils/i18n.=E2=80=A6=20(#12095)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- utils/i18n.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/i18n.go b/utils/i18n.go index d04221168d..fcb722dc8a 100644 --- a/utils/i18n.go +++ b/utils/i18n.go @@ -67,7 +67,7 @@ func InitTranslationsWithDir(dir string) error { func GetTranslationsBySystemLocale() (i18n.TranslateFunc, error) { locale := *settings.DefaultServerLocale if _, ok := locales[locale]; !ok { - mlog.Error(fmt.Sprintf("Failed to load system translations for '%v' attempting to fall back to '%v'", locale, model.DEFAULT_LOCALE)) + mlog.Error("Failed to load system translations for", mlog.String("locale", locale), mlog.String("attempting to fall back to default locale", model.DEFAULT_LOCALE)) locale = model.DEFAULT_LOCALE } @@ -80,7 +80,7 @@ func GetTranslationsBySystemLocale() (i18n.TranslateFunc, error) { return nil, fmt.Errorf("Failed to load system translations") } - mlog.Info(fmt.Sprintf("Loaded system translations for '%v' from '%v'", locale, locales[locale])) + mlog.Info("Loaded system translations", mlog.String("for locale", locale), mlog.String("from locale", locales[locale])) return translations, nil } From 5cf578f486e835293e56d83f11eae3cd4949c2fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jes=C3=BAs=20Espino?= Date: Thu, 12 Sep 2019 21:11:05 +0200 Subject: [PATCH 23/53] Migrating to our ldap fork (#12149) --- cmd/mattermost/main.go | 2 +- go.mod | 3 ++- go.sum | 3 +++ model/config.go | 2 +- .../{go-ldap => mattermost}/ldap/.gitignore | 0 .../{go-ldap => mattermost}/ldap/.travis.yml | 0 .../{go-ldap => mattermost}/ldap/CONTRIBUTING.md | 0 .../{go-ldap => mattermost}/ldap/LICENSE | 0 .../{go-ldap => mattermost}/ldap/Makefile | 0 .../{go-ldap => mattermost}/ldap/README.md | 0 .../{go-ldap => mattermost}/ldap/add.go | 0 .../{go-ldap => mattermost}/ldap/bind.go | 0 .../{go-ldap => mattermost}/ldap/client.go | 0 .../{go-ldap => mattermost}/ldap/compare.go | 0 .../{go-ldap => mattermost}/ldap/conn.go | 0 .../{go-ldap => mattermost}/ldap/control.go | 0 .../{go-ldap => mattermost}/ldap/debug.go | 0 .../{go-ldap => mattermost}/ldap/del.go | 0 .../{go-ldap => mattermost}/ldap/dn.go | 0 .../{go-ldap => mattermost}/ldap/doc.go | 0 .../{go-ldap => mattermost}/ldap/error.go | 0 .../{go-ldap => mattermost}/ldap/filter.go | 0 .../{go-ldap => mattermost}/ldap/ldap.go | 16 ++++++++++++---- .../{go-ldap => mattermost}/ldap/moddn.go | 0 .../{go-ldap => mattermost}/ldap/modify.go | 0 .../{go-ldap => mattermost}/ldap/passwdmodify.go | 0 .../{go-ldap => mattermost}/ldap/search.go | 0 vendor/modules.txt | 4 ++-- 28 files changed, 21 insertions(+), 9 deletions(-) rename vendor/github.com/{go-ldap => mattermost}/ldap/.gitignore (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/.travis.yml (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/CONTRIBUTING.md (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/LICENSE (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/Makefile (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/README.md (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/add.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/bind.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/client.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/compare.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/conn.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/control.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/debug.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/del.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/dn.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/doc.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/error.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/filter.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/ldap.go (96%) rename vendor/github.com/{go-ldap => mattermost}/ldap/moddn.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/modify.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/passwdmodify.go (100%) rename vendor/github.com/{go-ldap => mattermost}/ldap/search.go (100%) diff --git a/cmd/mattermost/main.go b/cmd/mattermost/main.go index c74ca8763d..69907b0f3c 100644 --- a/cmd/mattermost/main.go +++ b/cmd/mattermost/main.go @@ -16,9 +16,9 @@ import ( // Enterprise Deps _ "github.com/dgryski/dgoogauth" - _ "github.com/go-ldap/ldap" _ "github.com/hako/durafmt" _ "github.com/hashicorp/memberlist" + _ "github.com/mattermost/ldap" _ "github.com/mattermost/rsc/qr" _ "github.com/prometheus/client_golang/prometheus" _ "github.com/prometheus/client_golang/prometheus/promhttp" diff --git a/go.mod b/go.mod index 9d678fef9e..68fb18281c 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/fortytw2/leaktest v1.3.0 // indirect github.com/fsnotify/fsnotify v1.4.7 github.com/go-gorp/gorp v2.0.0+incompatible // indirect - github.com/go-ldap/ldap v3.0.3+incompatible + github.com/go-ldap/ldap v3.0.3+incompatible // indirect github.com/go-redis/redis v6.15.2+incompatible github.com/go-sql-driver/mysql v1.4.1 github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 @@ -45,6 +45,7 @@ require ( github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect github.com/mattermost/go-i18n v1.11.0 github.com/mattermost/gorp v2.0.1-0.20190301154413-3b31e9a39d05+incompatible + github.com/mattermost/ldap v3.0.4+incompatible github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0 github.com/mattermost/viper v1.0.4 github.com/mattn/go-runewidth v0.0.4 // indirect diff --git a/go.sum b/go.sum index ad84326b89..80832b8a78 100644 --- a/go.sum +++ b/go.sum @@ -24,6 +24,7 @@ github.com/a8m/mark v0.1.1-0.20170507133748-44f2db618845/go.mod h1:c8Mh99Cw82nrs github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= @@ -239,6 +240,8 @@ github.com/mattermost/go-i18n v1.11.0 h1:1hLKqn/ZvhZ80OekjVPGYcCrBfMz+YxNNgqS+be github.com/mattermost/go-i18n v1.11.0/go.mod h1:RyS7FDNQlzF1PsjbJWHRI35exqaKGSO9qD4iv8QjE34= github.com/mattermost/gorp v2.0.1-0.20190301154413-3b31e9a39d05+incompatible h1:FN4zK2wNig7MVVsOsGEZ+LeIq0gUcudn3LEGgbodMq8= github.com/mattermost/gorp v2.0.1-0.20190301154413-3b31e9a39d05+incompatible/go.mod h1:0kX1qa3DOpaPJyOdMLeo7TcBN0QmUszj9a/VygOhDe0= +github.com/mattermost/ldap v3.0.4+incompatible h1:SOeNnz+JNR+foQ3yHkYqijb9MLPhXN2BZP/PdX23VDU= +github.com/mattermost/ldap v3.0.4+incompatible/go.mod h1:b4reDCcGpBxJ4WX0f224KFY+OR0npin7or7EFpeIko4= github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0 h1:G9tL6JXRBMzjuD1kkBtcnd42kUiT6QDwxfFYu7adM6o= github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0/go.mod h1:nV5bfVpT//+B1RPD2JvRnxbkLmJEYXmRaaVl15fsXjs= github.com/mattermost/viper v1.0.4 h1:cMYOz4PhguscGSPxrSokUtib5HrG4gCpiUh27wyA3d0= diff --git a/model/config.go b/model/config.go index 9bc4376b5b..740c92ccf3 100644 --- a/model/config.go +++ b/model/config.go @@ -17,7 +17,7 @@ import ( "strings" "time" - "github.com/go-ldap/ldap" + "github.com/mattermost/ldap" ) const ( diff --git a/vendor/github.com/go-ldap/ldap/.gitignore b/vendor/github.com/mattermost/ldap/.gitignore similarity index 100% rename from vendor/github.com/go-ldap/ldap/.gitignore rename to vendor/github.com/mattermost/ldap/.gitignore diff --git a/vendor/github.com/go-ldap/ldap/.travis.yml b/vendor/github.com/mattermost/ldap/.travis.yml similarity index 100% rename from vendor/github.com/go-ldap/ldap/.travis.yml rename to vendor/github.com/mattermost/ldap/.travis.yml diff --git a/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md b/vendor/github.com/mattermost/ldap/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/go-ldap/ldap/CONTRIBUTING.md rename to vendor/github.com/mattermost/ldap/CONTRIBUTING.md diff --git a/vendor/github.com/go-ldap/ldap/LICENSE b/vendor/github.com/mattermost/ldap/LICENSE similarity index 100% rename from vendor/github.com/go-ldap/ldap/LICENSE rename to vendor/github.com/mattermost/ldap/LICENSE diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/mattermost/ldap/Makefile similarity index 100% rename from vendor/github.com/go-ldap/ldap/Makefile rename to vendor/github.com/mattermost/ldap/Makefile diff --git a/vendor/github.com/go-ldap/ldap/README.md b/vendor/github.com/mattermost/ldap/README.md similarity index 100% rename from vendor/github.com/go-ldap/ldap/README.md rename to vendor/github.com/mattermost/ldap/README.md diff --git a/vendor/github.com/go-ldap/ldap/add.go b/vendor/github.com/mattermost/ldap/add.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/add.go rename to vendor/github.com/mattermost/ldap/add.go diff --git a/vendor/github.com/go-ldap/ldap/bind.go b/vendor/github.com/mattermost/ldap/bind.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/bind.go rename to vendor/github.com/mattermost/ldap/bind.go diff --git a/vendor/github.com/go-ldap/ldap/client.go b/vendor/github.com/mattermost/ldap/client.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/client.go rename to vendor/github.com/mattermost/ldap/client.go diff --git a/vendor/github.com/go-ldap/ldap/compare.go b/vendor/github.com/mattermost/ldap/compare.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/compare.go rename to vendor/github.com/mattermost/ldap/compare.go diff --git a/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/mattermost/ldap/conn.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/conn.go rename to vendor/github.com/mattermost/ldap/conn.go diff --git a/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/mattermost/ldap/control.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/control.go rename to vendor/github.com/mattermost/ldap/control.go diff --git a/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/mattermost/ldap/debug.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/debug.go rename to vendor/github.com/mattermost/ldap/debug.go diff --git a/vendor/github.com/go-ldap/ldap/del.go b/vendor/github.com/mattermost/ldap/del.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/del.go rename to vendor/github.com/mattermost/ldap/del.go diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/mattermost/ldap/dn.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/dn.go rename to vendor/github.com/mattermost/ldap/dn.go diff --git a/vendor/github.com/go-ldap/ldap/doc.go b/vendor/github.com/mattermost/ldap/doc.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/doc.go rename to vendor/github.com/mattermost/ldap/doc.go diff --git a/vendor/github.com/go-ldap/ldap/error.go b/vendor/github.com/mattermost/ldap/error.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/error.go rename to vendor/github.com/mattermost/ldap/error.go diff --git a/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/mattermost/ldap/filter.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/filter.go rename to vendor/github.com/mattermost/ldap/filter.go diff --git a/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/mattermost/ldap/ldap.go similarity index 96% rename from vendor/github.com/go-ldap/ldap/ldap.go rename to vendor/github.com/mattermost/ldap/ldap.go index d7666676fe..e6155d508b 100644 --- a/vendor/github.com/go-ldap/ldap/ldap.go +++ b/vendor/github.com/mattermost/ldap/ldap.go @@ -270,10 +270,18 @@ func addRequestDescriptions(packet *ber.Packet) error { } func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error { - err := GetLDAPError(packet) - packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")" - packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")" - packet.Children[1].Children[2].Description = "Error Message" + resultCode := uint16(LDAPResultSuccess) + matchedDN := "" + description := "Success" + if err := GetLDAPError(packet); err != nil { + resultCode = err.(*Error).ResultCode + matchedDN = err.(*Error).MatchedDN + description = "Error Message" + } + + packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[resultCode] + ")" + packet.Children[1].Children[1].Description = "Matched DN (" + matchedDN + ")" + packet.Children[1].Children[2].Description = description if len(packet.Children[1].Children) > 3 { packet.Children[1].Children[3].Description = "Referral" } diff --git a/vendor/github.com/go-ldap/ldap/moddn.go b/vendor/github.com/mattermost/ldap/moddn.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/moddn.go rename to vendor/github.com/mattermost/ldap/moddn.go diff --git a/vendor/github.com/go-ldap/ldap/modify.go b/vendor/github.com/mattermost/ldap/modify.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/modify.go rename to vendor/github.com/mattermost/ldap/modify.go diff --git a/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/mattermost/ldap/passwdmodify.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/passwdmodify.go rename to vendor/github.com/mattermost/ldap/passwdmodify.go diff --git a/vendor/github.com/go-ldap/ldap/search.go b/vendor/github.com/mattermost/ldap/search.go similarity index 100% rename from vendor/github.com/go-ldap/ldap/search.go rename to vendor/github.com/mattermost/ldap/search.go diff --git a/vendor/modules.txt b/vendor/modules.txt index 7a1125ff2a..bfcff35ba2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -22,8 +22,6 @@ github.com/disintegration/imaging github.com/dyatlov/go-opengraph/opengraph # github.com/fsnotify/fsnotify v1.4.7 github.com/fsnotify/fsnotify -# github.com/go-ldap/ldap v3.0.3+incompatible -github.com/go-ldap/ldap # github.com/go-redis/redis v6.15.2+incompatible github.com/go-redis/redis github.com/go-redis/redis/internal @@ -126,6 +124,8 @@ github.com/mattermost/go-i18n/i18n/language github.com/mattermost/go-i18n/i18n/translation # github.com/mattermost/gorp v2.0.1-0.20190301154413-3b31e9a39d05+incompatible github.com/mattermost/gorp +# github.com/mattermost/ldap v3.0.4+incompatible +github.com/mattermost/ldap # github.com/mattermost/rsc v0.0.0-20160330161541-bbaefb05eaa0 github.com/mattermost/rsc/qr github.com/mattermost/rsc/qr/coding From d8f9dd271d3317e64ad561a7175b3ea4f11340aa Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Fri, 13 Sep 2019 02:30:41 +0200 Subject: [PATCH 24/53] =?UTF-8?q?MM-18268=20Converting=20to=20structured?= =?UTF-8?q?=20logging=20the=20file=20app/websock=E2=80=A6=20(#12143)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/websocket_router.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/app/websocket_router.go b/app/websocket_router.go index 38937d0fa4..e629bbdf4a 100644 --- a/app/websocket_router.go +++ b/app/websocket_router.go @@ -4,7 +4,6 @@ package app import ( - "fmt" "net/http" "github.com/mattermost/mattermost-server/mlog" @@ -89,7 +88,13 @@ func (wr *WebSocketRouter) ServeWebSocket(conn *WebConn, r *model.WebSocketReque } func ReturnWebSocketError(conn *WebConn, r *model.WebSocketRequest, err *model.AppError) { - mlog.Error(fmt.Sprintf("websocket routing error: seq=%v uid=%v %v [details: %v]", r.Seq, conn.UserId, err.SystemMessage(utils.T), err.DetailedError)) + mlog.Error( + "websocket routing error.", + mlog.Int64("seq", r.Seq), + mlog.String("user_id", conn.UserId), + mlog.String("system_message", err.SystemMessage(utils.T)), + mlog.Err(err), + ) err.DetailedError = "" errorResp := model.NewWebSocketError(r.Seq, err) From d9fa46e0a26d76531af930946f0637b67c36bf45 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Fri, 13 Sep 2019 03:02:14 +0200 Subject: [PATCH 25/53] =?UTF-8?q?Converting=20to=20structured=20logging=20?= =?UTF-8?q?the=20file=20store/sqlstore/post=5F=E2=80=A6=20(#12087)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- i18n/de.json | 4 ---- i18n/en.json | 4 ---- i18n/es.json | 4 ---- i18n/fr.json | 4 ---- i18n/it.json | 4 ---- i18n/ja.json | 4 ---- i18n/ko.json | 4 ---- i18n/nl.json | 4 ---- i18n/pl.json | 4 ---- i18n/pt-BR.json | 4 ---- i18n/ro.json | 4 ---- i18n/ru.json | 4 ---- i18n/tr.json | 4 ---- i18n/uk.json | 4 ---- i18n/zh-CN.json | 4 ---- i18n/zh-TW.json | 4 ---- mlog/log.go | 1 + store/sqlstore/post_store.go | 16 ++++++++-------- 18 files changed, 9 insertions(+), 72 deletions(-) diff --git a/i18n/de.json b/i18n/de.json index 92f71b20b9..9d97065f9b 100644 --- a/i18n/de.json +++ b/i18n/de.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Konnte die zu löschenden Nachrichten für den Benutzer nicht auswählen (zu viele), bitte wiederholen." }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Konnte die maximal unterstützte Nachrichtengröße nicht bestimmen." - }, { "id": "store.sql_post.save.app_error", "translation": "Konnte die Nachricht nicht speichern." diff --git a/i18n/en.json b/i18n/en.json index 7a417a6d99..c2f3ffb85d 100644 --- a/i18n/en.json +++ b/i18n/en.json @@ -6354,10 +6354,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Unable to select the posts to delete for the user (too many), please re-run" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Unable to determine the maximum supported post size" - }, { "id": "store.sql_post.save.app_error", "translation": "Unable to save the Post" diff --git a/i18n/es.json b/i18n/es.json index 6767e50c31..81fccfe0b0 100644 --- a/i18n/es.json +++ b/i18n/es.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "No se puede seleccionar los mensajes a eliminar del usuario (son demasiados), por favor ejecuta de nuevo" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "No se puede determinar el tamaño máximo soportado de los mensajes" - }, { "id": "store.sql_post.save.app_error", "translation": "No se puede guardar el mensaje" diff --git a/i18n/fr.json b/i18n/fr.json index 6ca1c1d245..44c6b20707 100644 --- a/i18n/fr.json +++ b/i18n/fr.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Impossible de sélectionner les messages à supprimer pour l'utilisateur (ils sont trop nombreux), veuillez relancer l'opération" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Impossible de déterminer la taille maximale supportée pour les messages" - }, { "id": "store.sql_post.save.app_error", "translation": "Impossible de sauvegarder le message" diff --git a/i18n/it.json b/i18n/it.json index 80d8873cd3..fba3ccaf19 100644 --- a/i18n/it.json +++ b/i18n/it.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Non è stato possibile selezionare le pubblicazioni da eliminare per l'utente (troppe pubblicazioni), per favore rilancia" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Impossibile determinare la dimensione massima supportata per le pubblicazioni" - }, { "id": "store.sql_post.save.app_error", "translation": "Impossibile salvare la pubblicazione" diff --git a/i18n/ja.json b/i18n/ja.json index b67c5fb611..80de9a5133 100644 --- a/i18n/ja.json +++ b/i18n/ja.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "ユーザーの削除すべき投稿を選択できませんでした(数が多過ぎます)。再度実行してください" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "投稿サイズの最大値を定義できませんでした" - }, { "id": "store.sql_post.save.app_error", "translation": "投稿を保存できませんでした" diff --git a/i18n/ko.json b/i18n/ko.json index daffe64dde..c52bb4dbcd 100644 --- a/i18n/ko.json +++ b/i18n/ko.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "We couldn't select the posts to delete for the user (too many), please re-run" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Unable to determine the maximum supported post size" - }, { "id": "store.sql_post.save.app_error", "translation": "내용을 가져올수 없습니다." diff --git a/i18n/nl.json b/i18n/nl.json index 8ff6c9e55e..92886986a7 100644 --- a/i18n/nl.json +++ b/i18n/nl.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "We kunnen de geselecteerde berichten voor de gebruiker niet verwijderen (het zijn er te veel), probeer opnieuw" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Unable to determine the maximum supported post size" - }, { "id": "store.sql_post.save.app_error", "translation": "Bericht kan niet opgehaald worden" diff --git a/i18n/pl.json b/i18n/pl.json index c63b4b4157..b1520e1f0c 100644 --- a/i18n/pl.json +++ b/i18n/pl.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Nie można wybrać postów do usunięcia dla użytkownika (zbyt wielu), uruchom ponownie" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Nie można określić maksymalnego obsługiwanego rozmiaru postu" - }, { "id": "store.sql_post.save.app_error", "translation": "Nie można zapisać wpisu" diff --git a/i18n/pt-BR.json b/i18n/pt-BR.json index e283bce960..cf6aa5c695 100644 --- a/i18n/pt-BR.json +++ b/i18n/pt-BR.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Não foi possível selecionar as publicações para excluir do usuário (muitos), por favor, re-executar" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Não foi possível determinar o tamanho máximo de publicação suportado" - }, { "id": "store.sql_post.save.app_error", "translation": "Não foi possível salvar a Publicação" diff --git a/i18n/ro.json b/i18n/ro.json index 460880b33a..0c923af58e 100644 --- a/i18n/ro.json +++ b/i18n/ro.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Am putut selectaţi mesaje pentru a şterge pentru utilizator (prea multe), vă rugăm să re-rula" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Noi nu a putut determina dimensiunea maximă suportate de post" - }, { "id": "store.sql_post.save.app_error", "translation": "Imposibil de salvat postarea" diff --git a/i18n/ru.json b/i18n/ru.json index 493e148ef4..22e4303529 100644 --- a/i18n/ru.json +++ b/i18n/ru.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Не удалось выбрать для удаления посты пользователя (слишком много), пожалуйста, запустите повторно" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Unable to determine the maximum supported post size" - }, { "id": "store.sql_post.save.app_error", "translation": "Не удалось удалить плагин" diff --git a/i18n/tr.json b/i18n/tr.json index 30907928c3..40615d161f 100644 --- a/i18n/tr.json +++ b/i18n/tr.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Kullanıcının silinecek iletileri seçilemedi (çok fazla), lütfen yeniden çalıştırın" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Desteklenen en büyük ileti boyutu belirlenemedi" - }, { "id": "store.sql_post.save.app_error", "translation": "İleti kaydedilemedi" diff --git a/i18n/uk.json b/i18n/uk.json index 5404fccfba..17ff5ea81d 100644 --- a/i18n/uk.json +++ b/i18n/uk.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "Не вдається вибрати повідомлення, які потрібно видалити для користувача (забагато), будь ласка, повторно запустіть" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "Ми не змогли визначити максимально підтримуваний розмір повідомлення" - }, { "id": "store.sql_post.save.app_error", "translation": "Не вдається зберегти публікацію" diff --git a/i18n/zh-CN.json b/i18n/zh-CN.json index 0c7c75fe39..2bbd5a6e20 100644 --- a/i18n/zh-CN.json +++ b/i18n/zh-CN.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "无法删除该用户被选择的信息(数量太多),请重新运行" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "无法判断最大支持的消息大小" - }, { "id": "store.sql_post.save.app_error", "translation": "无法保存消息" diff --git a/i18n/zh-TW.json b/i18n/zh-TW.json index 7471573f02..08f5b4235e 100644 --- a/i18n/zh-TW.json +++ b/i18n/zh-TW.json @@ -6022,10 +6022,6 @@ "id": "store.sql_post.permanent_delete_by_user.too_many.app_error", "translation": "無法選擇該使用者的訊息以刪除 (數量太多),請重新執行" }, - { - "id": "store.sql_post.query_max_post_size.error", - "translation": "無法判定最大支援的訊息大小" - }, { "id": "store.sql_post.save.app_error", "translation": "無法儲存訊息" diff --git a/mlog/log.go b/mlog/log.go index 503b140ed6..07d35a32da 100644 --- a/mlog/log.go +++ b/mlog/log.go @@ -28,6 +28,7 @@ const ( type Field = zapcore.Field var Int64 = zap.Int64 +var Int32 = zap.Int32 var Int = zap.Int var Uint32 = zap.Uint32 var String = zap.String diff --git a/store/sqlstore/post_store.go b/store/sqlstore/post_store.go index 521f64ce9e..fa994a1aaa 100644 --- a/store/sqlstore/post_store.go +++ b/store/sqlstore/post_store.go @@ -115,18 +115,18 @@ func (s *SqlPostStore) Save(post *model.Post) (*model.Post, *model.AppError) { post.Type != model.POST_ADD_TO_CHANNEL && post.Type != model.POST_REMOVE_FROM_CHANNEL && post.Type != model.POST_ADD_TO_TEAM && post.Type != model.POST_REMOVE_FROM_TEAM { if _, err := s.GetMaster().Exec("UPDATE Channels SET LastPostAt = GREATEST(:LastPostAt, LastPostAt), TotalMsgCount = TotalMsgCount + 1 WHERE Id = :ChannelId", map[string]interface{}{"LastPostAt": time, "ChannelId": post.ChannelId}); err != nil { - mlog.Error(fmt.Sprintf("Error updating Channel LastPostAt: %v", err.Error())) + mlog.Error("Error updating Channel LastPostAt.", mlog.Err(err)) } } else { // don't update TotalMsgCount for unimportant messages so that the channel isn't marked as unread if _, err := s.GetMaster().Exec("UPDATE Channels SET LastPostAt = :LastPostAt WHERE Id = :ChannelId AND LastPostAt < :LastPostAt", map[string]interface{}{"LastPostAt": time, "ChannelId": post.ChannelId}); err != nil { - mlog.Error(fmt.Sprintf("Error updating Channel LastPostAt: %v", err.Error())) + mlog.Error("Error updating Channel LastPostAt.", mlog.Err(err)) } } if len(post.RootId) > 0 { if _, err := s.GetMaster().Exec("UPDATE Posts SET UpdateAt = :UpdateAt WHERE Id = :RootId", map[string]interface{}{"UpdateAt": time, "RootId": post.RootId}); err != nil { - mlog.Error(fmt.Sprintf("Error updating Post UpdateAt: %v", err.Error())) + mlog.Error("Error updating Post UpdateAt.", mlog.Err(err)) } } @@ -1043,7 +1043,7 @@ func (s *SqlPostStore) Search(teamId string, userId string, params *model.Search _, err := s.GetSearchReplica().Select(&posts, searchQuery, queryParams) if err != nil { - mlog.Warn(fmt.Sprintf("Query error searching posts: %v", err.Error())) + mlog.Warn("Query error searching posts.", mlog.Err(err)) // Don't return the error to the caller as it is of no use to the user. Instead return an empty set of search results. } else { for _, p := range posts { @@ -1232,7 +1232,7 @@ func (s *SqlPostStore) GetPostsByIds(postIds []string) ([]*model.Post, *model.Ap _, err := s.GetReplica().Select(&posts, query, params) if err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("Query error getting posts.", mlog.Err(err)) return nil, model.NewAppError("SqlPostStore.GetPostsByIds", "store.sql_post.get_posts_by_ids.app_error", nil, "", http.StatusInternalServerError) } return posts, nil @@ -1321,7 +1321,7 @@ func (s *SqlPostStore) determineMaxPostSize() int { table_name = 'posts' AND column_name = 'message' `); err != nil { - mlog.Error(utils.T("store.sql_post.query_max_post_size.error") + err.Error()) + mlog.Error("Unable to determine the maximum supported post size", mlog.Err(err)) } } else if s.DriverName() == model.DATABASE_DRIVER_MYSQL { // The Post.Message column in MySQL has historically been TEXT, with a maximum @@ -1337,7 +1337,7 @@ func (s *SqlPostStore) determineMaxPostSize() int { AND column_name = 'Message' LIMIT 0, 1 `); err != nil { - mlog.Error(utils.T("store.sql_post.query_max_post_size.error") + err.Error()) + mlog.Error("Unable to determine the maximum supported post size", mlog.Err(err)) } } else { mlog.Warn("No implementation found to determine the maximum supported post size") @@ -1353,7 +1353,7 @@ func (s *SqlPostStore) determineMaxPostSize() int { maxPostSize = model.POST_MESSAGE_MAX_RUNES_V1 } - mlog.Info(fmt.Sprintf("Post.Message supports at most %d characters (%d bytes)", maxPostSize, maxPostSizeBytes)) + mlog.Info("Post.Message has size restrictions", mlog.Int("max_characters", maxPostSize), mlog.Int32("max_bytes", maxPostSizeBytes)) return maxPostSize } From 9f54e8267f0c7370cea6ff2436e0b8846a333a10 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Fri, 13 Sep 2019 07:31:59 +0200 Subject: [PATCH 26/53] Converting to structured logging the file app/webhook.go (#12142) --- app/webhook.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/app/webhook.go b/app/webhook.go index ae7491ce5a..5e0131c3a1 100644 --- a/app/webhook.go +++ b/app/webhook.go @@ -4,7 +4,6 @@ package app import ( - "fmt" "io" "net/http" "regexp" @@ -108,7 +107,7 @@ func (a *App) TriggerWebhook(payload *model.OutgoingWebhookPayload, hook *model. a.Srv.Go(func() { webhookResp, err := a.doOutgoingWebhookRequest(url, body, contentType) if err != nil { - mlog.Error(fmt.Sprintf("Event POST failed, err=%s", err.Error())) + mlog.Error("Event POST failed.", mlog.Err(err)) return } @@ -139,7 +138,7 @@ func (a *App) TriggerWebhook(payload *model.OutgoingWebhookPayload, hook *model. webhookResp.IconURL = hook.IconURL } if _, err := a.CreateWebhookPost(hook.CreatorId, channel, text, webhookResp.Username, webhookResp.IconURL, "", webhookResp.Props, webhookResp.Type, postRootId); err != nil { - mlog.Error(fmt.Sprintf("Failed to create response post, err=%v", err)) + mlog.Error("Failed to create response post.", mlog.Err(err)) } } }) From 4a3d7b93891abe3c0ab04219bc29019a57bb1294 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Fri, 13 Sep 2019 10:34:48 +0200 Subject: [PATCH 27/53] MM-18258 Converting to structured logging the file utils/html.go (#12134) * Converting to structured logging the file utils/html.go * using any instead of type assertion --- utils/html.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/utils/html.go b/utils/html.go index a3512c5a0a..fbf88730fa 100644 --- a/utils/html.go +++ b/utils/html.go @@ -6,7 +6,6 @@ package utils import ( "bytes" "errors" - "fmt" "html/template" "io" "path/filepath" @@ -28,7 +27,7 @@ type HTMLTemplateWatcher struct { func NewHTMLTemplateWatcher(directory string) (*HTMLTemplateWatcher, error) { templatesDir, _ := fileutils.FindDir(directory) - mlog.Debug(fmt.Sprintf("Parsing server templates at %v", templatesDir)) + mlog.Debug("Parsing server templates", mlog.String("templates_directory", templatesDir)) ret := &HTMLTemplateWatcher{ stop: make(chan struct{}), @@ -60,15 +59,15 @@ func NewHTMLTemplateWatcher(directory string) (*HTMLTemplateWatcher, error) { return case event := <-watcher.Events: if event.Op&fsnotify.Write == fsnotify.Write { - mlog.Info(fmt.Sprintf("Re-parsing templates because of modified file %v", event.Name)) + mlog.Info("Re-parsing templates because of modified file", mlog.String("file_name", event.Name)) if htmlTemplates, err := template.ParseGlob(filepath.Join(templatesDir, "*.html")); err != nil { - mlog.Error(fmt.Sprintf("Failed to parse templates %v", err)) + mlog.Error("Failed to parse templates.", mlog.Err(err)) } else { ret.templates.Store(htmlTemplates) } } case err := <-watcher.Errors: - mlog.Error(fmt.Sprintf("Failed in directory watcher %s", err)) + mlog.Error("Failed in directory watcher", mlog.Err(err)) } } }() @@ -113,7 +112,7 @@ func (t *HTMLTemplate) RenderToWriter(w io.Writer) error { } if err := t.Templates.ExecuteTemplate(w, t.TemplateName, t); err != nil { - mlog.Error(fmt.Sprintf("Error rendering template %v err=%v", t.TemplateName, err)) + mlog.Error("Error rendering template", mlog.String("template_name", t.TemplateName), mlog.Err(err)) return err } @@ -140,7 +139,11 @@ func escapeForHtml(arg interface{}) interface{} { } return safeArg default: - mlog.Warn(fmt.Sprintf("Unable to escape value for HTML template %v of type %v", arg, reflect.ValueOf(arg).Type())) + mlog.Warn( + "Unable to escape value for HTML template", + mlog.Any("html_template", arg), + mlog.String("template_type", reflect.ValueOf(arg).Type().String()), + ) return "" } } From 738a948e45cb4b50d4fad6b17a524f15f7c37357 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jes=C3=BAs=20Espino?= Date: Fri, 13 Sep 2019 10:57:48 +0200 Subject: [PATCH 28/53] Restricting groupmsg command to only allow to create group chats with know people (#12148) * Restricting groupmsg command to only allow to create group chats with know people * More generic response to the users about user he can't see * Making the code more clean --- app/command_groupmsg.go | 26 +++++++--- app/command_groupmsg_test.go | 95 ++++++++++++++++++++++-------------- 2 files changed, 77 insertions(+), 44 deletions(-) diff --git a/app/command_groupmsg.go b/app/command_groupmsg.go index 7c67f9ae37..71f40d4664 100644 --- a/app/command_groupmsg.go +++ b/app/command_groupmsg.go @@ -47,14 +47,26 @@ func (me *groupmsgProvider) DoCommand(a *App, args *model.CommandArgs, message s for _, username := range users { username = strings.TrimSpace(username) username = strings.TrimPrefix(username, "@") - if targetUser, err := a.Srv.Store.User().GetByUsername(username); err != nil { + targetUser, err := a.Srv.Store.User().GetByUsername(username) + if err != nil { invalidUsernames = append(invalidUsernames, username) - } else { - _, exists := targetUsers[targetUser.Id] - if !exists && targetUser.Id != args.UserId { - targetUsers[targetUser.Id] = targetUser - targetUsersSlice = append(targetUsersSlice, targetUser.Id) - } + continue + } + + canSee, err := a.UserCanSeeOtherUser(args.UserId, targetUser.Id) + if err != nil { + return &model.CommandResponse{Text: args.T("api.command_groupmsg.fail.app_error"), ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL} + } + + if !canSee { + invalidUsernames = append(invalidUsernames, username) + continue + } + + _, exists := targetUsers[targetUser.Id] + if !exists && targetUser.Id != args.UserId { + targetUsers[targetUser.Id] = targetUser + targetUsersSlice = append(targetUsersSlice, targetUser.Id) } } diff --git a/app/command_groupmsg_test.go b/app/command_groupmsg_test.go index 571c5641d8..5de4017058 100644 --- a/app/command_groupmsg_test.go +++ b/app/command_groupmsg_test.go @@ -52,46 +52,67 @@ func TestGroupMsgProvider(t *testing.T) { th.LinkUserToTeam(th.BasicUser, team) cmd := &groupmsgProvider{} - // Check without permission to create a GM channel. - resp := cmd.DoCommand(th.App, &model.CommandArgs{ - T: i18n.IdentityTfunc(), - SiteURL: "http://test.url", - TeamId: team.Id, - UserId: th.BasicUser.Id, - Session: model.Session{ - Roles: "", - }, - }, targetUsers+"hello") + t.Run("Check without permission to create a GM channel.", func(t *testing.T) { + resp := cmd.DoCommand(th.App, &model.CommandArgs{ + T: i18n.IdentityTfunc(), + SiteURL: "http://test.url", + TeamId: team.Id, + UserId: th.BasicUser.Id, + Session: model.Session{ + Roles: "", + }, + }, targetUsers+"hello") - channelName := model.GetGroupNameFromUserIds([]string{th.BasicUser.Id, th.BasicUser2.Id, user3.Id}) - assert.Equal(t, "api.command_groupmsg.permission.app_error", resp.Text) - assert.Equal(t, "", resp.GotoLocation) + assert.Equal(t, "api.command_groupmsg.permission.app_error", resp.Text) + assert.Equal(t, "", resp.GotoLocation) + }) - // Check with permission to create a GM channel. - resp = cmd.DoCommand(th.App, &model.CommandArgs{ - T: i18n.IdentityTfunc(), - SiteURL: "http://test.url", - TeamId: team.Id, - UserId: th.BasicUser.Id, - Session: model.Session{ - Roles: model.SYSTEM_USER_ROLE_ID, - }, - }, targetUsers+"hello") + t.Run("Check without permissions to view a user in the list.", func(t *testing.T) { + th.RemovePermissionFromRole(model.PERMISSION_VIEW_MEMBERS.Id, model.SYSTEM_USER_ROLE_ID) + defer th.AddPermissionToRole(model.PERMISSION_VIEW_MEMBERS.Id, model.SYSTEM_USER_ROLE_ID) + resp := cmd.DoCommand(th.App, &model.CommandArgs{ + T: i18n.IdentityTfunc(), + SiteURL: "http://test.url", + TeamId: team.Id, + UserId: th.BasicUser.Id, + Session: model.Session{ + Roles: model.SYSTEM_USER_ROLE_ID, + }, + }, targetUsers+"hello") - assert.Equal(t, "", resp.Text) - assert.Equal(t, "http://test.url/"+team.Name+"/channels/"+channelName, resp.GotoLocation) + assert.Equal(t, "api.command_groupmsg.invalid_user.app_error", resp.Text) + assert.Equal(t, "", resp.GotoLocation) + }) - // Check without permission to post to an existing GM channel. - resp = cmd.DoCommand(th.App, &model.CommandArgs{ - T: i18n.IdentityTfunc(), - SiteURL: "http://test.url", - TeamId: team.Id, - UserId: th.BasicUser.Id, - Session: model.Session{ - Roles: "", - }, - }, targetUsers+"hello") + t.Run("Check with permission to create a GM channel.", func(t *testing.T) { + resp := cmd.DoCommand(th.App, &model.CommandArgs{ + T: i18n.IdentityTfunc(), + SiteURL: "http://test.url", + TeamId: team.Id, + UserId: th.BasicUser.Id, + Session: model.Session{ + Roles: model.SYSTEM_USER_ROLE_ID, + }, + }, targetUsers+"hello") - assert.Equal(t, "", resp.Text) - assert.Equal(t, "http://test.url/"+team.Name+"/channels/"+channelName, resp.GotoLocation) + channelName := model.GetGroupNameFromUserIds([]string{th.BasicUser.Id, th.BasicUser2.Id, user3.Id}) + assert.Equal(t, "", resp.Text) + assert.Equal(t, "http://test.url/"+team.Name+"/channels/"+channelName, resp.GotoLocation) + }) + + t.Run("Check without permission to post to an existing GM channel.", func(t *testing.T) { + resp := cmd.DoCommand(th.App, &model.CommandArgs{ + T: i18n.IdentityTfunc(), + SiteURL: "http://test.url", + TeamId: team.Id, + UserId: th.BasicUser.Id, + Session: model.Session{ + Roles: "", + }, + }, targetUsers+"hello") + + channelName := model.GetGroupNameFromUserIds([]string{th.BasicUser.Id, th.BasicUser2.Id, user3.Id}) + assert.Equal(t, "", resp.Text) + assert.Equal(t, "http://test.url/"+team.Name+"/channels/"+channelName, resp.GotoLocation) + }) } From 995a5454114eb8567fbaffa0418078634485713b Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Fri, 13 Sep 2019 11:51:46 +0200 Subject: [PATCH 29/53] Convert app/export_test.go t.Fatal calls into assert/require calls (#12184) --- app/export_test.go | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/app/export_test.go b/app/export_test.go index bd7faed3de..2e41d40da4 100644 --- a/app/export_test.go +++ b/app/export_test.go @@ -115,9 +115,8 @@ func TestDirCreationForEmoji(t *testing.T) { pathToDir := th.App.createDirForEmoji("test.json", "exported_emoji_test") defer os.Remove(pathToDir) - if _, err := os.Stat(pathToDir); os.IsNotExist(err) { - t.Fatal("Directory exported_emoji_test should exist") - } + _, err := os.Stat(pathToDir) + require.False(t, os.IsNotExist(err), "Directory exported_emoji_test should exist") } func TestCopyEmojiImages(t *testing.T) { @@ -147,13 +146,10 @@ func TestCopyEmojiImages(t *testing.T) { defer os.RemoveAll(filePath) copyError := th.App.copyEmojiImages(emoji.Id, emojiImagePath, pathToDir) - if copyError != nil { - t.Fatal(copyError) - } + require.Nil(t, copyError) - if _, err := os.Stat(pathToDir + "/" + emoji.Id + "/image"); os.IsNotExist(err) { - t.Fatal("File should exist ", err) - } + _, err = os.Stat(pathToDir + "/" + emoji.Id + "/image") + require.False(t, os.IsNotExist(err), "File should exist ") } func TestExportCustomEmoji(t *testing.T) { @@ -170,9 +166,8 @@ func TestExportCustomEmoji(t *testing.T) { dirNameToExportEmoji := "exported_emoji_test" defer os.RemoveAll("../" + dirNameToExportEmoji) - if err := th.App.ExportCustomEmoji(fileWriter, filePath, pathToEmojiDir, dirNameToExportEmoji); err != nil { - t.Fatal(err) - } + err = th.App.ExportCustomEmoji(fileWriter, filePath, pathToEmojiDir, dirNameToExportEmoji) + require.Nil(t, err, "should not have failed") } func TestExportAllUsers(t *testing.T) { From bff8ad995df571f065d307d7157cace11a39728e Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Fri, 13 Sep 2019 15:07:00 +0200 Subject: [PATCH 30/53] Converting to structured logging the file app/license.go (#12170) --- app/license.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/license.go b/app/license.go index a85a10bec6..fa595c6855 100644 --- a/app/license.go +++ b/app/license.go @@ -29,7 +29,7 @@ func (a *App) LoadLicense() { if license != nil { if _, err = a.SaveLicense(licenseBytes); err != nil { - mlog.Info(fmt.Sprintf("Failed to save license key loaded from disk err=%v", err.Error())) + mlog.Info("Failed to save license key loaded from disk.", mlog.Err(err)) } else { licenseId = license.Id } From 4c4fbdc65235b7d7c915e57b42cf6d88a4a8d390 Mon Sep 17 00:00:00 2001 From: Lev <1187448+levb@users.noreply.github.com> Date: Fri, 13 Sep 2019 06:48:32 -0700 Subject: [PATCH 31/53] Bumped Jira plugin to 2.1.3 (#12185) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 165bcd9d8b..d4b0c05a33 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,7 @@ PLUGIN_PACKAGES += mattermost-plugin-github-v0.10.2 PLUGIN_PACKAGES += mattermost-plugin-welcomebot-v1.1.0 PLUGIN_PACKAGES += mattermost-plugin-aws-SNS-v1.0.2 PLUGIN_PACKAGES += mattermost-plugin-antivirus-v0.1.1 -PLUGIN_PACKAGES += mattermost-plugin-jira-v2.1.2 +PLUGIN_PACKAGES += mattermost-plugin-jira-v2.1.3 PLUGIN_PACKAGES += mattermost-plugin-gitlab-v1.0.0 PLUGIN_PACKAGES += mattermost-plugin-jenkins-v1.0.0 From 85ce27fc932b298ce7fae24cc79f1958cf97cd35 Mon Sep 17 00:00:00 2001 From: Miguel de la Cruz Date: Fri, 13 Sep 2019 19:58:27 +0200 Subject: [PATCH 32/53] [MM-18544] Initialise UserSearch struct before trying to decode JSON (#12198) --- model/user_search.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/model/user_search.go b/model/user_search.go index 9ea69c46e1..2a814d0fcb 100644 --- a/model/user_search.go +++ b/model/user_search.go @@ -33,14 +33,14 @@ func (u *UserSearch) ToJson() []byte { // UserSearchFromJson will decode the input and return a User func UserSearchFromJson(data io.Reader) *UserSearch { - var us *UserSearch + us := UserSearch{} json.NewDecoder(data).Decode(&us) if us.Limit == 0 { us.Limit = USER_SEARCH_DEFAULT_LIMIT } - return us + return &us } // UserSearchOptions captures internal parameters derived from the user's permissions and a From 890a7c698518c3b21f7e5054aeb9c7a9c7fc6cbf Mon Sep 17 00:00:00 2001 From: Martin Kraft Date: Fri, 13 Sep 2019 18:18:06 -0400 Subject: [PATCH 33/53] MM-18522: Include edited posts in compliance export. (#12197) * MM-18522: Export all post edits in compliance exports. * MM-18522: Changes ordering of export query post results. --- model/message_export.go | 1 + store/sqlstore/compliance_store.go | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/model/message_export.go b/model/message_export.go index 96b78b4b74..834c84e9eb 100644 --- a/model/message_export.go +++ b/model/message_export.go @@ -20,6 +20,7 @@ type MessageExport struct { PostId *string PostCreateAt *int64 + PostUpdateAt *int64 PostMessage *string PostType *string PostRootId *string diff --git a/store/sqlstore/compliance_store.go b/store/sqlstore/compliance_store.go index 3ae6ef5b47..6dfcafca3f 100644 --- a/store/sqlstore/compliance_store.go +++ b/store/sqlstore/compliance_store.go @@ -212,6 +212,7 @@ func (s SqlComplianceStore) MessageExport(after int64, limit int) ([]*model.Mess `SELECT Posts.Id AS PostId, Posts.CreateAt AS PostCreateAt, + Posts.UpdateAt AS PostUpdateAt, Posts.Message AS PostMessage, Posts.Type AS PostType, Posts.OriginalId AS PostOriginalId, @@ -240,9 +241,9 @@ func (s SqlComplianceStore) MessageExport(after int64, limit int) ([]*model.Mess LEFT OUTER JOIN Users ON Posts.UserId = Users.Id LEFT JOIN Bots ON Bots.UserId = Posts.UserId WHERE - Posts.CreateAt > :StartTime AND + (Posts.CreateAt > :StartTime OR Posts.EditAt > :StartTime) AND Posts.Type = '' - ORDER BY PostCreateAt + ORDER BY PostUpdateAt LIMIT :Limit` var cposts []*model.MessageExport From 43a068543c4bf7dce06804abd4efee680130151a Mon Sep 17 00:00:00 2001 From: Ogundele Olumide Date: Sat, 14 Sep 2019 11:53:37 +0100 Subject: [PATCH 34/53] fix: migrate the test to testify (#12158) - convert all t.Fatal to testify require or assert --- app/file_test.go | 105 ++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 65 deletions(-) diff --git a/app/file_test.go b/app/file_test.go index dc9321e054..e6263121ea 100644 --- a/app/file_test.go +++ b/app/file_test.go @@ -27,17 +27,12 @@ func TestGeneratePublicLinkHash(t *testing.T) { hash2 := GeneratePublicLinkHash(filename2, salt1) hash3 := GeneratePublicLinkHash(filename1, salt2) - if hash1 != GeneratePublicLinkHash(filename1, salt1) { - t.Fatal("hash should be equal for the same file name and salt") - } + hash := GeneratePublicLinkHash(filename1, salt1) + assert.Equal(t, hash, hash1, "hash should be equal for the same file name and salt") - if hash1 == hash2 { - t.Fatal("hashes for different files should not be equal") - } + assert.NotEqual(t, hash1, hash2, "hashes for different files should not be equal") - if hash1 == hash3 { - t.Fatal("hashes for the same file with different salts should not be equal") - } + assert.NotEqual(t, hash1, hash3, "hashes for the same file with different salts should not be equal") } func TestDoUploadFile(t *testing.T) { @@ -51,60 +46,44 @@ func TestDoUploadFile(t *testing.T) { data := []byte("abcd") info1, err := th.App.DoUploadFile(time.Date(2007, 2, 4, 1, 2, 3, 4, time.Local), teamId, channelId, userId, filename, data) - if err != nil { - t.Fatal(err) - } else { - defer func() { - th.App.Srv.Store.FileInfo().PermanentDelete(info1.Id) - th.App.RemoveFile(info1.Path) - }() - } + require.Nil(t, err, "DoUploadFile should succeed with valid data") + defer func() { + th.App.Srv.Store.FileInfo().PermanentDelete(info1.Id) + th.App.RemoveFile(info1.Path) + }() - if info1.Path != fmt.Sprintf("20070204/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info1.Id, filename) { - t.Fatal("stored file at incorrect path", info1.Path) - } + value := fmt.Sprintf("20070204/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info1.Id, filename) + assert.Equal(t, value, info1.Path, "stored file at incorrect path" ) info2, err := th.App.DoUploadFile(time.Date(2007, 2, 4, 1, 2, 3, 4, time.Local), teamId, channelId, userId, filename, data) - if err != nil { - t.Fatal(err) - } else { - defer func() { - th.App.Srv.Store.FileInfo().PermanentDelete(info2.Id) - th.App.RemoveFile(info2.Path) - }() - } + require.Nil(t, err, "DoUploadFile should succeed with valid data") + defer func() { + th.App.Srv.Store.FileInfo().PermanentDelete(info2.Id) + th.App.RemoveFile(info2.Path) + }() - if info2.Path != fmt.Sprintf("20070204/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info2.Id, filename) { - t.Fatal("stored file at incorrect path", info2.Path) - } + value = fmt.Sprintf("20070204/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info2.Id, filename) + assert.Equal(t, value, info2.Path, "stored file at incorrect path") info3, err := th.App.DoUploadFile(time.Date(2008, 3, 5, 1, 2, 3, 4, time.Local), teamId, channelId, userId, filename, data) - if err != nil { - t.Fatal(err) - } else { - defer func() { - th.App.Srv.Store.FileInfo().PermanentDelete(info3.Id) - th.App.RemoveFile(info3.Path) - }() - } + require.Nil(t, err, "DoUploadFile should succeed with valid data") + defer func() { + th.App.Srv.Store.FileInfo().PermanentDelete(info3.Id) + th.App.RemoveFile(info3.Path) + }() - if info3.Path != fmt.Sprintf("20080305/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info3.Id, filename) { - t.Fatal("stored file at incorrect path", info3.Path) - } + value = fmt.Sprintf("20080305/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info3.Id, filename) + assert.Equal(t, value, info3.Path, "stored file at incorrect path") info4, err := th.App.DoUploadFile(time.Date(2009, 3, 5, 1, 2, 3, 4, time.Local), "../../"+teamId, "../../"+channelId, "../../"+userId, "../../"+filename, data) - if err != nil { - t.Fatal(err) - } else { - defer func() { - th.App.Srv.Store.FileInfo().PermanentDelete(info4.Id) - th.App.RemoveFile(info4.Path) - }() - } + require.Nil(t, err, "DoUploadFile should succeed with valid data") + defer func() { + th.App.Srv.Store.FileInfo().PermanentDelete(info4.Id) + th.App.RemoveFile(info4.Path) + }() - if info4.Path != fmt.Sprintf("20090305/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info4.Id, filename) { - t.Fatal("stored file at incorrect path", info4.Path) - } + value = fmt.Sprintf("20090305/teams/%v/channels/%v/users/%v/%v/%v", teamId, channelId, userId, info4.Id, filename) + assert.Equal(t, value, info4.Path, "stored file at incorrect path") } func TestUploadFile(t *testing.T) { @@ -116,19 +95,15 @@ func TestUploadFile(t *testing.T) { data := []byte("abcd") info1, err := th.App.UploadFile(data, channelId, filename) - if err != nil { - t.Fatal(err) - } else { - defer func() { - th.App.Srv.Store.FileInfo().PermanentDelete(info1.Id) - th.App.RemoveFile(info1.Path) - }() - } + require.Nil(t, err, "UploadFile should succeed with valid data") + defer func() { + th.App.Srv.Store.FileInfo().PermanentDelete(info1.Id) + th.App.RemoveFile(info1.Path) + }() - if info1.Path != fmt.Sprintf("%v/teams/noteam/channels/%v/users/nouser/%v/%v", - time.Now().Format("20060102"), channelId, info1.Id, filename) { - t.Fatal("stored file at incorrect path", info1.Path) - } + value := fmt.Sprintf("%v/teams/noteam/channels/%v/users/nouser/%v/%v", + time.Now().Format("20060102"), channelId, info1.Id, filename) + assert.Equal(t, value, info1.Path, "Stored file at incorrect path") } func TestGetInfoForFilename(t *testing.T) { From 7ecd270d7c0579aae4840df23039b5548b7a3ac9 Mon Sep 17 00:00:00 2001 From: Phillip Ahereza Date: Sun, 15 Sep 2019 00:08:04 +0300 Subject: [PATCH 35/53] migrated plugin_deadlock_test.go to use testify (#12077) --- app/plugin_deadlock_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app/plugin_deadlock_test.go b/app/plugin_deadlock_test.go index f7dc05f22e..ae777e34f9 100644 --- a/app/plugin_deadlock_test.go +++ b/app/plugin_deadlock_test.go @@ -4,6 +4,7 @@ package app import ( + "github.com/stretchr/testify/require" "os" "strings" "testing" @@ -94,7 +95,7 @@ func TestPluginDeadlock(t *testing.T) { select { case <-done: case <-time.After(30 * time.Second): - t.Fatal("plugin failed to activate: likely deadlocked") + require.Fail(t, "plugin failed to activate: likely deadlocked") go func() { time.Sleep(5 * time.Second) os.Exit(1) @@ -201,7 +202,7 @@ func TestPluginDeadlock(t *testing.T) { select { case <-done: case <-time.After(30 * time.Second): - t.Fatal("plugin failed to activate: likely deadlocked") + require.Fail(t, "plugin failed to activate: likely deadlocked") go func() { time.Sleep(5 * time.Second) os.Exit(1) From 62eeca29b784dc5ae995951fb349b6c295b1d42a Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Mon, 16 Sep 2019 13:37:14 +0200 Subject: [PATCH 36/53] Converting to structured logging the file app/slackimport.go (#12137) --- app/slackimport.go | 59 ++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 25 deletions(-) diff --git a/app/slackimport.go b/app/slackimport.go index 97bf79bd85..62730c3d08 100644 --- a/app/slackimport.go +++ b/app/slackimport.go @@ -7,7 +7,6 @@ import ( "archive/zip" "bytes" "encoding/json" - "fmt" "io" "mime/multipart" "net/http" @@ -168,7 +167,7 @@ func (a *App) SlackAddUsers(teamId string, slackusers []SlackUser, importerLog * if email == "" { email = sUser.Username + "@example.com" importerLog.WriteString(utils.T("api.slackimport.slack_add_users.missing_email_address", map[string]interface{}{"Email": email, "Username": sUser.Username})) - mlog.Warn(fmt.Sprintf("Slack Import: User %v does not have an email address in the Slack export. Used %v as a placeholder. The user should update their email address once logged in to the system.", email, sUser.Username)) + mlog.Warn("Slack Import: User does not have an email address in the Slack export. Used username as a placeholder. The user should update their email address once logged in to the system.", mlog.String("user_email", email), mlog.String("user_name", sUser.Username)) } password := model.NewId() @@ -246,7 +245,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } newPost := model.Post{ @@ -288,7 +287,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.Comment.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } newPost := model.Post{ @@ -333,7 +332,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } @@ -361,7 +360,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } newPost := model.Post{ @@ -381,7 +380,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } newPost := model.Post{ @@ -398,7 +397,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } newPost := model.Post{ @@ -415,7 +414,7 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack continue } if users[sPost.User] == nil { - mlog.Debug(fmt.Sprintf("Slack Import: Unable to add the message as the Slack user %v does not exist in Mattermost.", sPost.User)) + mlog.Debug("Slack Import: Unable to add the message as the Slack user does not exist in Mattermost.", mlog.String("user", sPost.User)) continue } newPost := model.Post{ @@ -427,7 +426,11 @@ func (a *App) SlackAddPosts(teamId string, channel *model.Channel, posts []Slack } a.OldImportPost(&newPost) default: - mlog.Warn(fmt.Sprintf("Slack Import: Unable to import the message as its type is not supported: post_type=%v, post_subtype=%v.", sPost.Type, sPost.SubType)) + mlog.Warn( + "Slack Import: Unable to import the message as its type is not supported", + mlog.String("post_type", sPost.Type), + mlog.String("post_subtype", sPost.SubType), + ) } } } @@ -439,12 +442,12 @@ func (a *App) SlackUploadFile(slackPostFile *SlackFile, uploads map[string]*zip. } file, ok := uploads[slackPostFile.Id] if !ok { - mlog.Warn(fmt.Sprintf("Slack Import: Unable to import file %v as the file is missing from the Slack export zip file.", slackPostFile.Id)) + mlog.Warn("Slack Import: Unable to import file as the file is missing from the Slack export zip file.", mlog.String("file_id", slackPostFile.Id)) return nil, false } openFile, err := file.Open() if err != nil { - mlog.Warn(fmt.Sprintf("Slack Import: Unable to open the file %v from the Slack export: %v.", slackPostFile.Id, err.Error())) + mlog.Warn("Slack Import: Unable to open the file from the Slack export.", mlog.String("file_id", slackPostFile.Id), mlog.Err(err)) return nil, false } defer openFile.Close() @@ -452,7 +455,7 @@ func (a *App) SlackUploadFile(slackPostFile *SlackFile, uploads map[string]*zip. timestamp := utils.TimeFromMillis(SlackConvertTimeStamp(slackTimestamp)) uploadedFile, err := a.OldImportFile(timestamp, openFile, teamId, channelId, userId, filepath.Base(file.Name)) if err != nil { - mlog.Warn(fmt.Sprintf("Slack Import: An error occurred when uploading file %v: %v.", slackPostFile.Id, err.Error())) + mlog.Warn("Slack Import: An error occurred when uploading file.", mlog.String("file_id", slackPostFile.Id), mlog.Err(err)) return nil, false } @@ -480,22 +483,22 @@ func (a *App) addSlackUsersToChannel(members []string, users map[string]*model.U func SlackSanitiseChannelProperties(channel model.Channel) model.Channel { if utf8.RuneCountInString(channel.DisplayName) > model.CHANNEL_DISPLAY_NAME_MAX_RUNES { - mlog.Warn(fmt.Sprintf("Slack Import: Channel %v display name exceeds the maximum length. It will be truncated when imported.", channel.DisplayName)) + mlog.Warn("Slack Import: Channel display name exceeds the maximum length. It will be truncated when imported.", mlog.String("channel_display_name", channel.DisplayName)) channel.DisplayName = truncateRunes(channel.DisplayName, model.CHANNEL_DISPLAY_NAME_MAX_RUNES) } if len(channel.Name) > model.CHANNEL_NAME_MAX_LENGTH { - mlog.Warn(fmt.Sprintf("Slack Import: Channel %v handle exceeds the maximum length. It will be truncated when imported.", channel.DisplayName)) + mlog.Warn("Slack Import: Channel handle exceeds the maximum length. It will be truncated when imported.", mlog.String("channel_display_name", channel.DisplayName)) channel.Name = channel.Name[0:model.CHANNEL_NAME_MAX_LENGTH] } if utf8.RuneCountInString(channel.Purpose) > model.CHANNEL_PURPOSE_MAX_RUNES { - mlog.Warn(fmt.Sprintf("Slack Import: Channel %v purpose exceeds the maximum length. It will be truncated when imported.", channel.DisplayName)) + mlog.Warn("Slack Import: Channel purpose exceeds the maximum length. It will be truncated when imported.", mlog.String("channel_display_name", channel.DisplayName)) channel.Purpose = truncateRunes(channel.Purpose, model.CHANNEL_PURPOSE_MAX_RUNES) } if utf8.RuneCountInString(channel.Header) > model.CHANNEL_HEADER_MAX_RUNES { - mlog.Warn(fmt.Sprintf("Slack Import: Channel %v header exceeds the maximum length. It will be truncated when imported.", channel.DisplayName)) + mlog.Warn("Slack Import: Channel header exceeds the maximum length. It will be truncated when imported.", mlog.String("channel_display_name", channel.DisplayName)) channel.Header = truncateRunes(channel.Header, model.CHANNEL_HEADER_MAX_RUNES) } @@ -540,7 +543,7 @@ func (a *App) SlackAddChannels(teamId string, slackchannels []SlackChannel, post // Haven't found an existing channel to merge with. Try importing it as a new one. mChannel = a.OldImportChannel(&newChannel, sChannel, users) if mChannel == nil { - mlog.Warn(fmt.Sprintf("Slack Import: Unable to import Slack channel: %s.", newChannel.DisplayName)) + mlog.Warn("Slack Import: Unable to import Slack channel.", mlog.String("channel_display_name", newChannel.DisplayName)) importerLog.WriteString(utils.T("api.slackimport.slack_add_channels.import_failed", map[string]interface{}{"DisplayName": newChannel.DisplayName})) continue } @@ -563,7 +566,7 @@ func SlackConvertUserMentions(users []SlackUser, posts map[string][]SlackPost) m for _, user := range users { r, err := regexp.Compile("<@" + user.Id + `(\|` + user.Username + ")?>") if err != nil { - mlog.Warn(fmt.Sprintf("Slack Import: Unable to compile the @mention, matching regular expression for the Slack user %v (id=%v).", user.Id, user.Username), mlog.String("user_id", user.Id)) + mlog.Warn("Slack Import: Unable to compile the @mention, matching regular expression for the Slack user.", mlog.String("user_name", user.Username), mlog.String("user_id", user.Id)) continue } regexes["@"+user.Username] = r @@ -591,7 +594,7 @@ func SlackConvertChannelMentions(channels []SlackChannel, posts map[string][]Sla for _, channel := range channels { r, err := regexp.Compile("<#" + channel.Id + `(\|` + channel.Name + ")?>") if err != nil { - mlog.Warn(fmt.Sprintf("Slack Import: Unable to compile the !channel, matching regular expression for the Slack channel %v (id=%v).", channel.Id, channel.Name)) + mlog.Warn("Slack Import: Unable to compile the !channel, matching regular expression for the Slack channel.", mlog.String("channel_id", channel.Id), mlog.String("channel_name", channel.Name)) continue } regexes["~"+channel.Name] = r @@ -783,7 +786,7 @@ func (a *App) OldImportPost(post *model.Post) string { _, err := a.Srv.Store.Post().Save(post) if err != nil { - mlog.Debug(fmt.Sprintf("Error saving post. user=%v, message=%v", post.UserId, post.Message)) + mlog.Debug("Error saving post.", mlog.String("user_id", post.UserId), mlog.String("message", post.Message)) } if firstIteration { @@ -792,7 +795,13 @@ func (a *App) OldImportPost(post *model.Post) string { } for _, fileId := range post.FileIds { if err := a.Srv.Store.FileInfo().AttachToPost(fileId, post.Id, post.UserId); err != nil { - mlog.Error(fmt.Sprintf("Error attaching files to post. postId=%v, fileIds=%v, message=%v", post.Id, post.FileIds, err), mlog.String("post_id", post.Id)) + mlog.Error( + "Error attaching files to post.", + mlog.String("post_id", post.Id), + mlog.String("file_ids", strings.Join(post.FileIds, ",")), + mlog.String("user_id", post.UserId), + mlog.Err(err), + ) } } post.FileIds = nil @@ -813,16 +822,16 @@ func (a *App) OldImportUser(team *model.Team, user *model.User) *model.User { ruser, err := a.Srv.Store.User().Save(user) if err != nil { - mlog.Error(fmt.Sprintf("Error saving user. err=%v", err)) + mlog.Error("Error saving user.", mlog.Err(err)) return nil } if _, err = a.Srv.Store.User().VerifyEmail(ruser.Id, ruser.Email); err != nil { - mlog.Error(fmt.Sprintf("Failed to set email verified err=%v", err)) + mlog.Error("Failed to set email verified.", mlog.Err(err)) } if err = a.JoinUserToTeam(team, user, ""); err != nil { - mlog.Error(fmt.Sprintf("Failed to join team when importing err=%v", err)) + mlog.Error("Failed to join team when importing.", mlog.Err(err)) } return ruser From 50ae252f9cf48b3d42dfaacf3b2b3573cc512582 Mon Sep 17 00:00:00 2001 From: Darrell Richards Date: Mon, 16 Sep 2019 08:28:09 -0400 Subject: [PATCH 37/53] GH-12194 Converting to structured logging the file app/user.go (#12201) * Converted Logging on app/user.go * Fixed a couple of issues with logging using the mlog.Err. * Fixed issues from feedback via PR. --- app/user.go | 68 ++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/app/user.go b/app/user.go index 0b9f5922dd..bc5da8a814 100644 --- a/app/user.go +++ b/app/user.go @@ -95,7 +95,7 @@ func (a *App) CreateUserWithToken(user *model.User, token *model.Token) (*model. for _, channel := range channels { _, err := a.AddChannelMember(ruser.Id, channel, "", "") if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to add channel member", mlog.Err(err)) } } } @@ -135,7 +135,7 @@ func (a *App) CreateUserWithInviteId(user *model.User, inviteId string) (*model. a.AddDirectChannels(team.Id, ruser) if err := a.SendWelcomeEmail(ruser.Id, ruser.Email, ruser.EmailVerified, ruser.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send welcome email on create user with inviteId", mlog.Err(err)) } return ruser, nil @@ -148,7 +148,7 @@ func (a *App) CreateUserAsAdmin(user *model.User) (*model.User, *model.AppError) } if err := a.SendWelcomeEmail(ruser.Id, ruser.Email, ruser.EmailVerified, ruser.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send welcome email on create admin user", mlog.Err(err)) } return ruser, nil @@ -172,7 +172,7 @@ func (a *App) CreateUserFromSignup(user *model.User) (*model.User, *model.AppErr } if err := a.SendWelcomeEmail(ruser.Id, ruser.Email, ruser.EmailVerified, ruser.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send welcome email on create user from signup", mlog.Err(err)) } return ruser, nil @@ -190,7 +190,7 @@ func (a *App) IsFirstUserAccount() bool { if a.SessionCacheLength() == 0 { count, err := a.Srv.Store.User().Count(model.UserCountOptions{IncludeDeleted: true}) if err != nil { - mlog.Error(fmt.Sprint(err)) + mlog.Error("There was a error fetching if first user account", mlog.Err(err)) return false } if count <= 0 { @@ -314,19 +314,19 @@ func (a *App) createUser(user *model.User) (*model.User, *model.AppError) { ruser, err := a.Srv.Store.User().Save(user) if err != nil { - mlog.Error(fmt.Sprintf("Couldn't save the user err=%v", err)) + mlog.Error("Couldn't save the user", mlog.Err(err)) return nil, err } if user.EmailVerified { if err := a.VerifyUserEmail(ruser.Id, user.Email); err != nil { - mlog.Error(fmt.Sprintf("Failed to set email verified err=%v", err)) + mlog.Error("Failed to set email verified", mlog.Err(err)) } } pref := model.Preference{UserId: ruser.Id, Category: model.PREFERENCE_CATEGORY_TUTORIAL_STEPS, Name: ruser.Id, Value: "0"} if err := a.Srv.Store.Preference().Save(&model.Preferences{pref}); err != nil { - mlog.Error(fmt.Sprintf("Encountered error saving tutorial preference, err=%v", err.Message)) + mlog.Error("Encountered error saving tutorial preference", mlog.Err(err)) } ruser.Sanitize(map[string]bool{}) @@ -397,7 +397,7 @@ func (a *App) CreateOAuthUser(service string, userData io.Reader, teamId string) err = a.AddDirectChannels(teamId, user) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to add direct channels", mlog.Err(err)) } } @@ -835,14 +835,14 @@ func (a *App) SetDefaultProfileImage(user *model.User) *model.AppError { } if err := a.Srv.Store.User().ResetLastPictureUpdate(user.Id); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to reset last picture update", mlog.Err(err)) } a.InvalidateCacheForUser(user.Id) updatedUser, appErr := a.GetUser(user.Id) if appErr != nil { - mlog.Error(fmt.Sprintf("Error in getting users profile for id=%v forcing logout", user.Id), mlog.String("user_id", user.Id)) + mlog.Error("Error in getting users profile forcing logout", mlog.String("user_id", user.Id), mlog.Err(appErr)) return nil } @@ -908,7 +908,7 @@ func (a *App) SetProfileImageFromFile(userId string, file io.Reader) *model.AppE } if err := a.Srv.Store.User().UpdateLastPictureUpdate(userId); err != nil { - mlog.Error(err.Error()) + mlog.Error("Error with updating last picture update", mlog.Err(err)) } a.invalidateUserCacheAndPublish(userId) @@ -1137,13 +1137,13 @@ func (a *App) UpdateUser(user *model.User, sendNotifications bool) (*model.User, if *a.Config().EmailSettings.RequireEmailVerification { a.Srv.Go(func() { if err := a.SendEmailVerification(userUpdate.New, newEmail); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send email verification", mlog.Err(err)) } }) } else { a.Srv.Go(func() { if err := a.SendEmailChangeEmail(userUpdate.Old.Email, userUpdate.New.Email, userUpdate.New.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send email change email", mlog.Err(err)) } }) } @@ -1152,7 +1152,7 @@ func (a *App) UpdateUser(user *model.User, sendNotifications bool) (*model.User, if userUpdate.New.Username != userUpdate.Old.Username { a.Srv.Go(func() { if err := a.SendChangeUsernameEmail(userUpdate.Old.Username, userUpdate.New.Username, userUpdate.New.Email, userUpdate.New.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send change username email", mlog.Err(err)) } }) } @@ -1214,12 +1214,12 @@ func (a *App) UpdateMfa(activate bool, userId, token string) *model.AppError { a.Srv.Go(func() { user, err := a.GetUser(userId) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get user", mlog.Err(err)) return } if err := a.SendMfaChangeEmail(user.Email, activate, user.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send mfa change email", mlog.Err(err)) } }) @@ -1256,7 +1256,7 @@ func (a *App) UpdatePasswordSendEmail(user *model.User, newPassword, method stri a.Srv.Go(func() { if err := a.SendPasswordChangeEmail(user.Email, method, user.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send password change email", mlog.Err(err)) } }) @@ -1302,7 +1302,7 @@ func (a *App) ResetPasswordFromToken(userSuppliedTokenString, newPassword string } if err := a.DeleteToken(token); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to delete token", mlog.Err(err)) } return nil @@ -1399,7 +1399,7 @@ func (a *App) UpdateUserRoles(userId string, newRoles string, sendWebSocketEvent if result := <-schan; result.Err != nil { // soft error since the user roles were still updated - mlog.Error(fmt.Sprint(result.Err)) + mlog.Error("Failed during updating user roles", mlog.Err(result.Err)) } a.ClearSessionCacheForUser(user.Id) @@ -1415,9 +1415,9 @@ func (a *App) UpdateUserRoles(userId string, newRoles string, sendWebSocketEvent } func (a *App) PermanentDeleteUser(user *model.User) *model.AppError { - mlog.Warn(fmt.Sprintf("Attempting to permanently delete account %v id=%v", user.Email, user.Id), mlog.String("user_id", user.Id)) + mlog.Warn("Attempting to permanently delete account", mlog.String("user_id", user.Id), mlog.String("user_email", user.Email)) if user.IsInRole(model.SYSTEM_ADMIN_ROLE_ID) { - mlog.Warn(fmt.Sprintf("You are deleting %v that is a system administrator. You may need to set another account as the system administrator using the command line tools.", user.Email)) + mlog.Warn("You are deleting a user that is a system administrator. You may need to set another account as the system administrator using the command line tools.", mlog.String("user_email", user.Email)) } if _, err := a.UpdateActive(user, false); err != nil { @@ -1466,7 +1466,7 @@ func (a *App) PermanentDeleteUser(user *model.User) *model.AppError { infos, err := a.Srv.Store.FileInfo().GetForUser(user.Id) if err != nil { - mlog.Warn("Error getting file list for user from FileInfoStore") + mlog.Warn("Error getting file list for user from FileInfoStore", mlog.Err(err)) } for _, info := range infos { @@ -1512,7 +1512,7 @@ func (a *App) PermanentDeleteUser(user *model.User) *model.AppError { return err } - mlog.Warn(fmt.Sprintf("Permanently deleted account %v id=%v", user.Email, user.Id), mlog.String("user_id", user.Id)) + mlog.Warn("Permanently deleted account", mlog.String("user_email", user.Email), mlog.String("user_id", user.Id)) if a.IsESIndexingEnabled() { a.Srv.Go(func() { @@ -1580,13 +1580,13 @@ func (a *App) VerifyEmailFromToken(userSuppliedTokenString string) *model.AppErr if user.Email != tokenData.Email { a.Srv.Go(func() { if err := a.SendEmailChangeEmail(user.Email, tokenData.Email, user.Locale, a.GetSiteURL()); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to send email change email", mlog.Err(err)) } }) } if err := a.DeleteToken(token); err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to delete token", mlog.Err(err)) } return nil @@ -2242,13 +2242,13 @@ func (a *App) PromoteGuestToUser(user *model.User, requestorId string) *model.Ap for _, team := range userTeams { // Soft error if there is an issue joining the default channels if err = a.JoinDefaultChannels(team.Id, user, false, requestorId); err != nil { - mlog.Error(fmt.Sprintf("Encountered an issue joining default channels err=%v", err), mlog.String("user_id", user.Id), mlog.String("team_id", team.Id), mlog.String("requestor_id", requestorId)) + mlog.Error("Failed to join default channels", mlog.String("user_id", user.Id), mlog.String("team_id", team.Id), mlog.String("requestor_id", requestorId), mlog.Err(err)) } } promotedUser, err := a.GetUser(user.Id) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get user on promote guest to user", mlog.Err(err)) } else { a.sendUpdatedUserEvent(*promotedUser) a.UpdateSessionsIsGuest(promotedUser.Id, promotedUser.IsGuest()) @@ -2256,7 +2256,7 @@ func (a *App) PromoteGuestToUser(user *model.User, requestorId string) *model.Ap teamMembers, err := a.GetTeamMembersForUser(user.Id) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get team members for user on promote guest to user", mlog.Err(err)) } for _, member := range teamMembers { @@ -2264,7 +2264,7 @@ func (a *App) PromoteGuestToUser(user *model.User, requestorId string) *model.Ap channelMembers, err := a.GetChannelMembersForUser(member.TeamId, user.Id) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get channel members for user on promote guest to user", mlog.Err(err)) } for _, member := range *channelMembers { @@ -2287,7 +2287,7 @@ func (a *App) DemoteUserToGuest(user *model.User) *model.AppError { demotedUser, err := a.GetUser(user.Id) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get user on demote user to guest", mlog.Err(err)) } else { a.sendUpdatedUserEvent(*demotedUser) a.UpdateSessionsIsGuest(demotedUser.Id, demotedUser.IsGuest()) @@ -2295,7 +2295,7 @@ func (a *App) DemoteUserToGuest(user *model.User) *model.AppError { teamMembers, err := a.GetTeamMembersForUser(user.Id) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get team members for users on demote user to guest", mlog.Err(err)) } for _, member := range teamMembers { @@ -2303,7 +2303,7 @@ func (a *App) DemoteUserToGuest(user *model.User) *model.AppError { channelMembers, err := a.GetChannelMembersForUser(member.TeamId, user.Id) if err != nil { - mlog.Error(err.Error()) + mlog.Error("Failed to get channel members for users on demote user to guest", mlog.Err(err)) } for _, member := range *channelMembers { @@ -2322,7 +2322,7 @@ func (a *App) invalidateUserCacheAndPublish(userId string) { user, userErr := a.GetUser(userId) if userErr != nil { - mlog.Error(fmt.Sprintf("Error in getting users profile for id=%v, err=%v", userId, userErr.Error()), mlog.String("user_id", userId)) + mlog.Error("Error in getting users profile", mlog.String("user_id", userId), mlog.Err(userErr)) return } From 6a2f09ae7888947c8f9195fcb443a3328b38d29d Mon Sep 17 00:00:00 2001 From: Ogundele Olumide Date: Mon, 16 Sep 2019 16:27:40 +0100 Subject: [PATCH 38/53] chore: migrate test to testify kit (#12230) - convert t.fatal to require.Fail --- app/post_metadata_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/post_metadata_test.go b/app/post_metadata_test.go index d9dbe85f3a..a2dcd7efc3 100644 --- a/app/post_metadata_test.go +++ b/app/post_metadata_test.go @@ -544,7 +544,7 @@ func TestGetEmbedForPost(t *testing.T) { `)) } else { - t.Fatal("Invalid path", r.URL.Path) + require.Fail(t, "Invalid path", r.URL.Path) } })) defer server.Close() From ad9784d7e3741d5311691bf0fd60318c7391d0fc Mon Sep 17 00:00:00 2001 From: Claudio Costa Date: Mon, 16 Sep 2019 19:34:36 +0200 Subject: [PATCH 39/53] [MM-16934] Add API endpoint to update channel privacy (#11993) * Add API endpoint to update channel privacy setting * Fix language files * Improve tests by making sure channel has been updated correctly * Improve tests --- api4/channel.go | 49 ++++++++++++++++++++++++++++++++++ api4/channel_test.go | 62 ++++++++++++++++++++++++++++++++++++++++++++ i18n/en.json | 4 +++ model/client4.go | 11 ++++++++ 4 files changed, 126 insertions(+) diff --git a/api4/channel.go b/api4/channel.go index 03edc36a7b..84275acc09 100644 --- a/api4/channel.go +++ b/api4/channel.go @@ -35,6 +35,7 @@ func (api *API) InitChannel() { api.BaseRoutes.Channel.Handle("", api.ApiSessionRequired(updateChannel)).Methods("PUT") api.BaseRoutes.Channel.Handle("/patch", api.ApiSessionRequired(patchChannel)).Methods("PUT") api.BaseRoutes.Channel.Handle("/convert", api.ApiSessionRequired(convertChannelToPrivate)).Methods("POST") + api.BaseRoutes.Channel.Handle("/privacy", api.ApiSessionRequired(updateChannelPrivacy)).Methods("PUT") api.BaseRoutes.Channel.Handle("/restore", api.ApiSessionRequired(restoreChannel)).Methods("POST") api.BaseRoutes.Channel.Handle("", api.ApiSessionRequired(deleteChannel)).Methods("DELETE") api.BaseRoutes.Channel.Handle("/stats", api.ApiSessionRequired(getChannelStats)).Methods("GET") @@ -229,6 +230,54 @@ func convertChannelToPrivate(c *Context, w http.ResponseWriter, r *http.Request) w.Write([]byte(rchannel.ToJson())) } +func updateChannelPrivacy(c *Context, w http.ResponseWriter, r *http.Request) { + c.RequireChannelId() + if c.Err != nil { + return + } + + props := model.StringInterfaceFromJson(r.Body) + privacy, ok := props["privacy"].(string) + if !ok || (privacy != model.CHANNEL_OPEN && privacy != model.CHANNEL_PRIVATE) { + c.SetInvalidParam("privacy") + return + } + + channel, err := c.App.GetChannel(c.Params.ChannelId) + if err != nil { + c.Err = err + return + } + + if !c.App.SessionHasPermissionToTeam(c.App.Session, channel.TeamId, model.PERMISSION_MANAGE_TEAM) { + c.SetPermissionError(model.PERMISSION_MANAGE_TEAM) + return + } + + if channel.Name == model.DEFAULT_CHANNEL && privacy == model.CHANNEL_PRIVATE { + c.Err = model.NewAppError("updateChannelPrivacy", "api.channel.update_channel_privacy.default_channel_error", nil, "", http.StatusBadRequest) + return + } + + user, err := c.App.GetUser(c.App.Session.UserId) + if err != nil { + c.Err = err + return + } + + channel.Type = privacy + + updatedChannel, err := c.App.UpdateChannelPrivacy(channel, user) + if err != nil { + c.Err = err + return + } + + c.LogAudit("name=" + updatedChannel.Name) + + w.Write([]byte(updatedChannel.ToJson())) +} + func patchChannel(c *Context, w http.ResponseWriter, r *http.Request) { c.RequireChannelId() if c.Err != nil { diff --git a/api4/channel_test.go b/api4/channel_test.go index fb7ac11d15..8ff7a96753 100644 --- a/api4/channel_test.go +++ b/api4/channel_test.go @@ -1387,6 +1387,68 @@ func TestConvertChannelToPrivate(t *testing.T) { } } +func TestUpdateChannelPrivacy(t *testing.T) { + th := Setup().InitBasic() + defer th.TearDown() + Client := th.Client + + type testTable []struct { + name string + channel *model.Channel + expectedPrivacy string + } + + defaultChannel, _ := th.App.GetChannelByName(model.DEFAULT_CHANNEL, th.BasicTeam.Id, false) + privateChannel := th.CreatePrivateChannel() + publicChannel := th.CreatePublicChannel() + + tt := testTable{ + {"Updating default channel should fail with forbidden status if not logged in", defaultChannel, model.CHANNEL_OPEN}, + {"Updating private channel should fail with forbidden status if not logged in", privateChannel, model.CHANNEL_PRIVATE}, + {"Updating public channel should fail with forbidden status if not logged in", publicChannel, model.CHANNEL_OPEN}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + _, resp := Client.UpdateChannelPrivacy(tc.channel.Id, tc.expectedPrivacy) + CheckForbiddenStatus(t, resp) + }) + } + + th.LoginTeamAdmin() + + tt = testTable{ + {"Converting default channel to private should fail", defaultChannel, model.CHANNEL_PRIVATE}, + {"Updating privacy to an invalid setting should fail", publicChannel, "invalid"}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + _, resp := Client.UpdateChannelPrivacy(tc.channel.Id, tc.expectedPrivacy) + CheckBadRequestStatus(t, resp) + }) + } + + tt = testTable{ + {"Default channel should stay public", defaultChannel, model.CHANNEL_OPEN}, + {"Public channel should stay public", publicChannel, model.CHANNEL_OPEN}, + {"Private channel should stay private", privateChannel, model.CHANNEL_PRIVATE}, + {"Public channel should convert to private", publicChannel, model.CHANNEL_PRIVATE}, + {"Private channel should convert to public", privateChannel, model.CHANNEL_OPEN}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + updatedChannel, resp := Client.UpdateChannelPrivacy(tc.channel.Id, tc.expectedPrivacy) + CheckNoError(t, resp) + assert.Equal(t, updatedChannel.Type, tc.expectedPrivacy) + updatedChannel, err := th.App.GetChannel(tc.channel.Id) + require.Nil(t, err) + assert.Equal(t, updatedChannel.Type, tc.expectedPrivacy) + }) + } +} + func TestRestoreChannel(t *testing.T) { th := Setup().InitBasic() defer th.TearDown() diff --git a/i18n/en.json b/i18n/en.json index c2f3ffb85d..2376e75418 100644 --- a/i18n/en.json +++ b/i18n/en.json @@ -387,6 +387,10 @@ "id": "api.channel.update_channel_member_roles.scheme_role.app_error", "translation": "The provided role is managed by a Scheme and therefore cannot be applied directly to a Channel Member" }, + { + "id": "api.channel.update_channel_privacy.default_channel_error", + "translation": "The default channel cannot be made private." + }, { "id": "api.channel.update_channel_scheme.license.error", "translation": "Your license does not support updating a channel's scheme" diff --git a/model/client4.go b/model/client4.go index a81696597c..aac5fd4bf3 100644 --- a/model/client4.go +++ b/model/client4.go @@ -2100,6 +2100,17 @@ func (c *Client4) ConvertChannelToPrivate(channelId string) (*Channel, *Response return ChannelFromJson(r.Body), BuildResponse(r) } +// UpdateChannelPrivacy updates channel privacy +func (c *Client4) UpdateChannelPrivacy(channelId string, privacy string) (*Channel, *Response) { + requestBody := map[string]string{"privacy": privacy} + r, err := c.DoApiPut(c.GetChannelRoute(channelId)+"/privacy", MapToJson(requestBody)) + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + return ChannelFromJson(r.Body), BuildResponse(r) +} + // RestoreChannel restores a previously deleted channel. Any missing fields are not updated. func (c *Client4) RestoreChannel(channelId string) (*Channel, *Response) { r, err := c.DoApiPost(c.GetChannelRoute(channelId)+"/restore", "") From fe4127272372e954fa4410d7bef722c6592cd12b Mon Sep 17 00:00:00 2001 From: Arshdeep Singh Chimni Date: Tue, 17 Sep 2019 00:48:28 +0530 Subject: [PATCH 40/53] migrate "app/diagnostics_test.go" to use testify (#12072) --- app/diagnostics_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/diagnostics_test.go b/app/diagnostics_test.go index b0e13b1be8..0d70beef0e 100644 --- a/app/diagnostics_test.go +++ b/app/diagnostics_test.go @@ -68,7 +68,7 @@ func TestDiagnostics(t *testing.T) { case identifyMessage := <-data: require.Contains(t, identifyMessage, diagnosticID) case <-time.After(time.Second * 1): - t.Fatal("Did not receive ID message") + require.Fail(t,"Did not receive ID message") } t.Run("Send", func(t *testing.T) { @@ -80,7 +80,7 @@ func TestDiagnostics(t *testing.T) { case result := <-data: require.Contains(t, result, testValue) case <-time.After(time.Second * 1): - t.Fatal("Did not receive diagnostic") + require.Fail(t,"Did not receive diagnostic") } }) @@ -137,7 +137,7 @@ func TestDiagnostics(t *testing.T) { select { case <-data: - t.Fatal("Should not send diagnostics when the segment key is not set") + require.Fail(t,"Should not send diagnostics when the segment key is not set") case <-time.After(time.Second * 1): // Did not receive diagnostics } @@ -150,7 +150,7 @@ func TestDiagnostics(t *testing.T) { select { case <-data: - t.Fatal("Should not send diagnostics when they are disabled") + require.Fail(t,"Should not send diagnostics when they are disabled") case <-time.After(time.Second * 1): // Did not receive diagnostics } From 29c738dc9db11e2572bc14f0ede783522c2c3e04 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Mon, 16 Sep 2019 23:28:13 +0200 Subject: [PATCH 41/53] Converting to structured logging the file app/email_batching.go (#12127) * Converting to structured logging the file app/email_batching.go * reverting to fmt.Sprintf as per need --- app/email_batching.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/email_batching.go b/app/email_batching.go index 1631b0d21e..01f82f31e4 100644 --- a/app/email_batching.go +++ b/app/email_batching.go @@ -142,7 +142,7 @@ func (job *EmailBatchingJob) checkPendingNotifications(now time.Time, handler fu team, err := job.server.Store.Team().GetByName(notifications[0].teamName) if err != nil { - mlog.Error(fmt.Sprint("Unable to find Team id for notification", err)) + mlog.Error("Unable to find Team id for notification", mlog.Err(err)) continue } @@ -154,13 +154,13 @@ func (job *EmailBatchingJob) checkPendingNotifications(now time.Time, handler fu // all queued notifications channelMembers, err := job.server.Store.Channel().GetMembersForUser(inspectedTeamNames[notification.teamName], userId) if err != nil { - mlog.Error(fmt.Sprint("Unable to find ChannelMembers for user", err)) + mlog.Error("Unable to find ChannelMembers for user", mlog.Err(err)) continue } for _, channelMember := range *channelMembers { if channelMember.LastViewedAt >= batchStartTime { - mlog.Debug(fmt.Sprintf("Deleted notifications for user %s", userId), mlog.String("user_id", userId)) + mlog.Debug("Deleted notifications for user", mlog.String("user_id", userId)) delete(job.pendingNotifications, userId) break } @@ -241,7 +241,7 @@ func (s *Server) sendBatchedEmailNotification(userId string, notifications []*ba body.Props["BodyText"] = translateFunc("api.email_batching.send_batched_email_notification.body_text", len(notifications)) if err := s.FakeApp().SendNotificationMail(user.Email, subject, body.Render()); err != nil { - mlog.Warn(fmt.Sprintf("Unable to send batched email notification err=%v", err), mlog.String("email", user.Email)) + mlog.Warn("Unable to send batched email notification", mlog.String("email", user.Email), mlog.Err(err)) } } From 04653ec924219b5296845db1d73749da4469bc09 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Tue, 17 Sep 2019 00:11:17 +0200 Subject: [PATCH 42/53] Convert app/helper_test.go t.Fatal calls into assert/require calls (#12223) * Convert app/helper_test.go t.Fatal calls into assert/require calls * using Equalf --- app/helper_test.go | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/app/helper_test.go b/app/helper_test.go index 947f07888c..be61728e2e 100644 --- a/app/helper_test.go +++ b/app/helper_test.go @@ -16,6 +16,7 @@ import ( "github.com/mattermost/mattermost-server/mlog" "github.com/mattermost/mattermost-server/model" "github.com/mattermost/mattermost-server/utils" + "github.com/stretchr/testify/require" ) type TestHelper struct { @@ -499,22 +500,14 @@ func (me *TestHelper) ResetEmojisMigration() { func (me *TestHelper) CheckTeamCount(t *testing.T, expected int64) { teamCount, err := me.App.Srv.Store.Team().AnalyticsTeamCount() - if err != nil { - t.Fatalf("Failed to get team count.") - } - if teamCount != expected { - t.Fatalf("Unexpected number of teams. Expected: %v, found: %v", expected, teamCount) - } + require.Nil(t, err, "Failed to get team count.") + require.Equalf(t, teamCount, expected, "Unexpected number of teams. Expected: %v, found: %v", expected, teamCount) } func (me *TestHelper) CheckChannelsCount(t *testing.T, expected int64) { - if count, err := me.App.Srv.Store.Channel().AnalyticsTypeCount("", model.CHANNEL_OPEN); err == nil { - if count != expected { - t.Fatalf("Unexpected number of channels. Expected: %v, found: %v", expected, count) - } - } else { - t.Fatalf("Failed to get channel count.") - } + count, err := me.App.Srv.Store.Channel().AnalyticsTypeCount("", model.CHANNEL_OPEN) + require.Nilf(t, err, "Failed to get channel count.") + require.Equalf(t, count, expected, "Unexpected number of channels. Expected: %v, found: %v", expected, count) } func (me *TestHelper) SetupTeamScheme() *model.Scheme { From 5b79fc4110853a752932252dddfc9bce241de67c Mon Sep 17 00:00:00 2001 From: Paulo Bittencourt Date: Tue, 17 Sep 2019 02:03:28 -0400 Subject: [PATCH 43/53] [MM-17889] Implement validation of plugin API version comments (#11941) --- Makefile | 1 + cmd/mattermost/commands/config_test.go | 8 +- go.mod | 1 + go.sum | 1 + plugin/api.go | 116 +- plugin/checker/main.go | 126 ++ plugin/checker/main_test.go | 49 + plugin/checker/test/invalid/invalid.go | 16 + plugin/checker/test/missing/missing.go | 8 + plugin/checker/test/valid/valid.go | 12 + vendor/golang.org/x/tools/AUTHORS | 3 + vendor/golang.org/x/tools/CONTRIBUTORS | 3 + vendor/golang.org/x/tools/LICENSE | 27 + vendor/golang.org/x/tools/PATENTS | 22 + .../x/tools/go/gcexportdata/gcexportdata.go | 109 ++ .../x/tools/go/gcexportdata/importer.go | 73 ++ .../x/tools/go/gcexportdata/main.go | 99 ++ .../x/tools/go/internal/gcimporter/bexport.go | 852 +++++++++++++ .../x/tools/go/internal/gcimporter/bimport.go | 1036 ++++++++++++++++ .../go/internal/gcimporter/exportdata.go | 93 ++ .../go/internal/gcimporter/gcimporter.go | 1078 +++++++++++++++++ .../x/tools/go/internal/gcimporter/iexport.go | 723 +++++++++++ .../x/tools/go/internal/gcimporter/iimport.go | 606 +++++++++ .../go/internal/gcimporter/newInterface10.go | 21 + .../go/internal/gcimporter/newInterface11.go | 13 + .../tools/go/internal/packagesdriver/sizes.go | 160 +++ vendor/golang.org/x/tools/go/packages/doc.go | 222 ++++ .../x/tools/go/packages/external.go | 79 ++ .../golang.org/x/tools/go/packages/golist.go | 860 +++++++++++++ .../x/tools/go/packages/golist_overlay.go | 262 ++++ .../x/tools/go/packages/packages.go | 1071 ++++++++++++++++ .../golang.org/x/tools/go/packages/visit.go | 55 + .../x/tools/internal/fastwalk/fastwalk.go | 196 +++ .../fastwalk/fastwalk_dirent_fileno.go | 13 + .../internal/fastwalk/fastwalk_dirent_ino.go | 14 + .../fastwalk/fastwalk_dirent_namlen_bsd.go | 13 + .../fastwalk/fastwalk_dirent_namlen_linux.go | 29 + .../internal/fastwalk/fastwalk_portable.go | 37 + .../tools/internal/fastwalk/fastwalk_unix.go | 127 ++ .../x/tools/internal/gopathwalk/walk.go | 250 ++++ .../x/tools/internal/semver/semver.go | 388 ++++++ vendor/modules.txt | 8 + 42 files changed, 8875 insertions(+), 5 deletions(-) create mode 100644 plugin/checker/main.go create mode 100644 plugin/checker/main_test.go create mode 100644 plugin/checker/test/invalid/invalid.go create mode 100644 plugin/checker/test/missing/missing.go create mode 100644 plugin/checker/test/valid/valid.go create mode 100644 vendor/golang.org/x/tools/AUTHORS create mode 100644 vendor/golang.org/x/tools/CONTRIBUTORS create mode 100644 vendor/golang.org/x/tools/LICENSE create mode 100644 vendor/golang.org/x/tools/PATENTS create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/importer.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go create mode 100644 vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go create mode 100644 vendor/golang.org/x/tools/go/packages/doc.go create mode 100644 vendor/golang.org/x/tools/go/packages/external.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist.go create mode 100644 vendor/golang.org/x/tools/go/packages/golist_overlay.go create mode 100644 vendor/golang.org/x/tools/go/packages/packages.go create mode 100644 vendor/golang.org/x/tools/go/packages/visit.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go create mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go create mode 100644 vendor/golang.org/x/tools/internal/gopathwalk/walk.go create mode 100644 vendor/golang.org/x/tools/internal/semver/semver.go diff --git a/Makefile b/Makefile index d4b0c05a33..d76f4ff3af 100644 --- a/Makefile +++ b/Makefile @@ -144,6 +144,7 @@ govet: ## Runs govet against all packages. $(GO) get golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow $(GO) vet $(GOFLAGS) $(ALL_PACKAGES) || exit 1 $(GO) vet -vettool=$(GOPATH)/bin/shadow $(GOFLAGS) $(ALL_PACKAGES) || exit 1 + $(GO) run plugin/checker/main.go gofmt: ## Runs gofmt against all packages. @echo Running GOFMT diff --git a/cmd/mattermost/commands/config_test.go b/cmd/mattermost/commands/config_test.go index a27420bf21..d5cb45ccc5 100644 --- a/cmd/mattermost/commands/config_test.go +++ b/cmd/mattermost/commands/config_test.go @@ -136,16 +136,16 @@ func TestConfigSet(t *testing.T) { }) t.Run("Error when the wrong value is set", func(t *testing.T) { - assert.Error(t, th.RunCommand(t, "config", "set", "EmailSettings.ConnectionSecurity", "invalid")) + assert.Error(t, th.RunCommand(t, "config", "set", "EmailSettings.ConnectionSecurity", "invalid-key")) output := th.CheckCommand(t, "config", "get", "EmailSettings.ConnectionSecurity") - assert.NotContains(t, string(output), "invalid") + assert.NotContains(t, string(output), "invalid-key") }) t.Run("Error when the wrong locale is set", func(t *testing.T) { th.CheckCommand(t, "config", "set", "LocalizationSettings.DefaultServerLocale", "es") - assert.Error(t, th.RunCommand(t, "config", "set", "LocalizationSettings.DefaultServerLocale", "invalid")) + assert.Error(t, th.RunCommand(t, "config", "set", "LocalizationSettings.DefaultServerLocale", "invalid-key")) output := th.CheckCommand(t, "config", "get", "LocalizationSettings.DefaultServerLocale") - assert.NotContains(t, string(output), "invalid") + assert.NotContains(t, string(output), "invalid-key") assert.NotContains(t, string(output), "\"en\"") }) diff --git a/go.mod b/go.mod index 68fb18281c..ff2d36a861 100644 --- a/go.mod +++ b/go.mod @@ -88,6 +88,7 @@ require ( golang.org/x/net v0.0.0-20190628185345-da137c7871d7 golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7 // indirect golang.org/x/text v0.3.2 + golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b google.golang.org/appengine v1.6.1 // indirect google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 // indirect google.golang.org/grpc v1.22.0 // indirect diff --git a/go.sum b/go.sum index 80832b8a78..b9f77b9f1e 100644 --- a/go.sum +++ b/go.sum @@ -509,6 +509,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b h1:mSUCVIwDx4hfXJfWsOPfdzEHxzb2Xjl6BQ8YgPnazQA= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= diff --git a/plugin/api.go b/plugin/api.go index c799b47f57..0bfe69cc84 100644 --- a/plugin/api.go +++ b/plugin/api.go @@ -16,19 +16,29 @@ import ( type API interface { // LoadPluginConfiguration loads the plugin's configuration. dest should be a pointer to a // struct that the configuration JSON can be unmarshalled to. + // + // Minimum server version: 5.2 LoadPluginConfiguration(dest interface{}) error // RegisterCommand registers a custom slash command. When the command is triggered, your plugin // can fulfill it via the ExecuteCommand hook. + // + // Minimum server version: 5.2 RegisterCommand(command *model.Command) error // UnregisterCommand unregisters a command previously registered via RegisterCommand. + // + // Minimum server version: 5.2 UnregisterCommand(teamId, trigger string) error // GetSession returns the session object for the Session ID + // + // Minimum server version: 5.2 GetSession(sessionId string) (*model.Session, *model.AppError) // GetConfig fetches the currently persisted config + // + // Minimum server version: 5.2 GetConfig() *model.Config // GetUnsanitizedConfig fetches the currently persisted config without removing secrets. @@ -37,6 +47,8 @@ type API interface { GetUnsanitizedConfig() *model.Config // SaveConfig sets the given config and persists the changes + // + // Minimum server version: 5.2 SaveConfig(config *model.Config) *model.AppError // GetPluginConfig fetches the currently persisted config of plugin @@ -76,9 +88,13 @@ type API interface { GetDiagnosticId() string // CreateUser creates a user. + // + // Minimum server version: 5.2 CreateUser(user *model.User) (*model.User, *model.AppError) // DeleteUser deletes a user. + // + // Minimum server version: 5.2 DeleteUser(userId string) *model.AppError // GetUsers a list of users based on search options. @@ -87,12 +103,18 @@ type API interface { GetUsers(options *model.UserGetOptions) ([]*model.User, *model.AppError) // GetUser gets a user. + // + // Minimum server version: 5.2 GetUser(userId string) (*model.User, *model.AppError) // GetUserByEmail gets a user by their email address. + // + // Minimum server version: 5.2 GetUserByEmail(email string) (*model.User, *model.AppError) // GetUserByUsername gets a user by their username. + // + // Minimum server version: 5.2 GetUserByUsername(name string) (*model.User, *model.AppError) // GetUsersByUsernames gets users by their usernames. @@ -121,16 +143,24 @@ type API interface { RemoveTeamIcon(teamId string) *model.AppError // UpdateUser updates a user. + // + // Minimum server version: 5.2 UpdateUser(user *model.User) (*model.User, *model.AppError) // GetUserStatus will get a user's status. + // + // Minimum server version: 5.2 GetUserStatus(userId string) (*model.Status, *model.AppError) // GetUserStatusesByIds will return a list of user statuses based on the provided slice of user IDs. + // + // Minimum server version: 5.2 GetUserStatusesByIds(userIds []string) ([]*model.Status, *model.AppError) // UpdateUserStatus will set a user's status until the user, or another integration/plugin, sets it back to online. // The status parameter can be: "online", "away", "dnd", or "offline". + // + // Minimum server version: 5.2 UpdateUserStatus(userId, status string) (*model.Status, *model.AppError) // UpdateUserActive deactivates or reactivates an user. @@ -153,18 +183,28 @@ type API interface { GetLDAPUserAttributes(userId string, attributes []string) (map[string]string, *model.AppError) // CreateTeam creates a team. + // + // Minimum server version: 5.2 CreateTeam(team *model.Team) (*model.Team, *model.AppError) // DeleteTeam deletes a team. + // + // Minimum server version: 5.2 DeleteTeam(teamId string) *model.AppError // GetTeam gets all teams. + // + // Minimum server version: 5.2 GetTeams() ([]*model.Team, *model.AppError) // GetTeam gets a team. + // + // Minimum server version: 5.2 GetTeam(teamId string) (*model.Team, *model.AppError) // GetTeamByName gets a team by its name. + // + // Minimum server version: 5.2 GetTeamByName(name string) (*model.Team, *model.AppError) // GetTeamsUnreadForUser gets the unread message and mention counts for each team to which the given user belongs. @@ -173,6 +213,8 @@ type API interface { GetTeamsUnreadForUser(userId string) ([]*model.TeamUnread, *model.AppError) // UpdateTeam updates a team. + // + // Minimum server version: 5.2 UpdateTeam(team *model.Team) (*model.Team, *model.AppError) // SearchTeams search a team. @@ -186,18 +228,28 @@ type API interface { GetTeamsForUser(userId string) ([]*model.Team, *model.AppError) // CreateTeamMember creates a team membership. + // + // Minimum server version: 5.2 CreateTeamMember(teamId, userId string) (*model.TeamMember, *model.AppError) // CreateTeamMember creates a team membership for all provided user ids. + // + // Minimum server version: 5.2 CreateTeamMembers(teamId string, userIds []string, requestorId string) ([]*model.TeamMember, *model.AppError) // DeleteTeamMember deletes a team membership. + // + // Minimum server version: 5.2 DeleteTeamMember(teamId, userId, requestorId string) *model.AppError // GetTeamMembers returns the memberships of a specific team. + // + // Minimum server version: 5.2 GetTeamMembers(teamId string, page, perPage int) ([]*model.TeamMember, *model.AppError) // GetTeamMember returns a specific membership. + // + // Minimum server version: 5.2 GetTeamMember(teamId, userId string) (*model.TeamMember, *model.AppError) // GetTeamMembersForUser returns all team memberships for a user. @@ -206,24 +258,38 @@ type API interface { GetTeamMembersForUser(userId string, page int, perPage int) ([]*model.TeamMember, *model.AppError) // UpdateTeamMemberRoles updates the role for a team membership. + // + // Minimum server version: 5.2 UpdateTeamMemberRoles(teamId, userId, newRoles string) (*model.TeamMember, *model.AppError) // CreateChannel creates a channel. + // + // Minimum server version: 5.2 CreateChannel(channel *model.Channel) (*model.Channel, *model.AppError) // DeleteChannel deletes a channel. + // + // Minimum server version: 5.2 DeleteChannel(channelId string) *model.AppError // GetPublicChannelsForTeam gets a list of all channels. + // + // Minimum server version: 5.2 GetPublicChannelsForTeam(teamId string, page, perPage int) ([]*model.Channel, *model.AppError) // GetChannel gets a channel. + // + // Minimum server version: 5.2 GetChannel(channelId string) (*model.Channel, *model.AppError) // GetChannelByName gets a channel by its name, given a team id. + // + // Minimum server version: 5.2 GetChannelByName(teamId, name string, includeDeleted bool) (*model.Channel, *model.AppError) // GetChannelByNameForTeamName gets a channel by its name, given a team name. + // + // Minimum server version: 5.2 GetChannelByNameForTeamName(teamName, channelName string, includeDeleted bool) (*model.Channel, *model.AppError) // GetChannelsForTeamForUser gets a list of channels for given user ID in given team ID. @@ -238,13 +304,19 @@ type API interface { // GetDirectChannel gets a direct message channel. // If the channel does not exist it will create it. + // + // Minimum server version: 5.2 GetDirectChannel(userId1, userId2 string) (*model.Channel, *model.AppError) // GetGroupChannel gets a group message channel. // If the channel does not exist it will create it. + // + // Minimum server version: 5.2 GetGroupChannel(userIds []string) (*model.Channel, *model.AppError) // UpdateChannel updates a channel. + // + // Minimum server version: 5.2 UpdateChannel(channel *model.Channel) (*model.Channel, *model.AppError) // SearchChannels returns the channels on a team matching the provided search term. @@ -263,9 +335,13 @@ type API interface { SearchPostsInTeam(teamId string, paramsList []*model.SearchParams) ([]*model.Post, *model.AppError) // AddChannelMember creates a channel membership for a user. + // + // Minimum server version: 5.2 AddChannelMember(channelId, userId string) (*model.ChannelMember, *model.AppError) // GetChannelMember gets a channel membership for a user. + // + // Minimum server version: 5.2 GetChannelMember(channelId, userId string) (*model.ChannelMember, *model.AppError) // GetChannelMembers gets a channel membership for all users. @@ -284,15 +360,23 @@ type API interface { GetChannelMembersForUser(teamId, userId string, page, perPage int) ([]*model.ChannelMember, *model.AppError) // UpdateChannelMemberRoles updates a user's roles for a channel. + // + // Minimum server version: 5.2 UpdateChannelMemberRoles(channelId, userId, newRoles string) (*model.ChannelMember, *model.AppError) // UpdateChannelMemberNotifications updates a user's notification properties for a channel. + // + // Minimum server version: 5.2 UpdateChannelMemberNotifications(channelId, userId string, notifications map[string]string) (*model.ChannelMember, *model.AppError) // DeleteChannelMember deletes a channel membership for a user. + // + // Minimum server version: 5.2 DeleteChannelMember(channelId, userId string) *model.AppError // CreatePost creates a post. + // + // Minimum server version: 5.2 CreatePost(post *model.Post) (*model.Post, *model.AppError) // AddReaction add a reaction to a post. @@ -311,17 +395,25 @@ type API interface { GetReactions(postId string) ([]*model.Reaction, *model.AppError) // SendEphemeralPost creates an ephemeral post. + // + // Minimum server version: 5.2 SendEphemeralPost(userId string, post *model.Post) *model.Post // UpdateEphemeralPost updates an ephemeral message previously sent to the user. // EXPERIMENTAL: This API is experimental and can be changed without advance notice. + // + // Minimum server version: 5.2 UpdateEphemeralPost(userId string, post *model.Post) *model.Post // DeleteEphemeralPost deletes an ephemeral message previously sent to the user. // EXPERIMENTAL: This API is experimental and can be changed without advance notice. + // + // Minimum server version: 5.2 DeleteEphemeralPost(userId, postId string) // DeletePost deletes a post. + // + // Minimum server version: 5.2 DeletePost(postId string) *model.AppError // GetPostThread gets a post with all the other posts in the same thread. @@ -330,6 +422,8 @@ type API interface { GetPostThread(postId string) (*model.PostList, *model.AppError) // GetPost gets a post. + // + // Minimum server version: 5.2 GetPost(postId string) (*model.Post, *model.AppError) // GetPostsSince gets posts created after a specified time as Unix time in milliseconds. @@ -358,6 +452,8 @@ type API interface { GetTeamStats(teamId string) (*model.TeamStats, *model.AppError) // UpdatePost updates a post. + // + // Minimum server version: 5.2 UpdatePost(post *model.Post) (*model.Post, *model.AppError) // GetProfileImage gets user's profile image. @@ -393,6 +489,8 @@ type API interface { // The duplicate FileInfo objects are not initially linked to a post, but may now be passed // to CreatePost. Use this API to duplicate a post and its file attachments without // actually duplicating the uploaded files. + // + // Minimum server version: 5.2 CopyFileInfos(userId string, fileIds []string) ([]string, *model.AppError) // GetFileInfo gets a File Info for a specific fileId @@ -402,7 +500,7 @@ type API interface { // GetFile gets content of a file by it's ID // - // Minimum Server version: 5.8 + // Minimum server version: 5.8 GetFile(fileId string) ([]byte, *model.AppError) // GetFileLink gets the public link to a file by fileId. @@ -463,6 +561,8 @@ type API interface { // KVSet stores a key-value pair, unique per plugin. // Provided helper functions and internal plugin code will use the prefix `mmi_` before keys. Do not use this prefix. + // + // Minimum server version: 5.2 KVSet(key string, value []byte) *model.AppError // KVCompareAndSet updates a key-value pair, unique per plugin, but only if the current value matches the given oldValue. @@ -488,9 +588,13 @@ type API interface { KVSetWithExpiry(key string, value []byte, expireInSeconds int64) *model.AppError // KVGet retrieves a value based on the key, unique per plugin. Returns nil for non-existent keys. + // + // Minimum server version: 5.2 KVGet(key string) ([]byte, *model.AppError) // KVDelete removes a key-value pair, unique per plugin. Returns nil for non-existent keys. + // + // Minimum server version: 5.2 KVDelete(key string) *model.AppError // KVDeleteAll removes all key-value pairs for a plugin. @@ -507,6 +611,8 @@ type API interface { // event is the type and will be prepended with "custom__". // payload is the data sent with the event. Interface values must be primitive Go types or mattermost-server/model types. // broadcast determines to which users to send the event. + // + // Minimum server version: 5.2 PublishWebSocketEvent(event string, payload map[string]interface{}, broadcast *model.WebsocketBroadcast) // HasPermissionTo check if the user has the permission at system scope. @@ -528,24 +634,32 @@ type API interface { // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // Minimum server version: 5.2 LogDebug(msg string, keyValuePairs ...interface{}) // LogInfo writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // Minimum server version: 5.2 LogInfo(msg string, keyValuePairs ...interface{}) // LogError writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // Minimum server version: 5.2 LogError(msg string, keyValuePairs ...interface{}) // LogWarn writes a log message to the Mattermost server log file. // Appropriate context such as the plugin name will already be added as fields so plugins // do not need to add that info. // keyValuePairs should be primitive go types or other values that can be encoded by encoding/gob + // + // Minimum server version: 5.2 LogWarn(msg string, keyValuePairs ...interface{}) // SendMail sends an email to a specific address diff --git a/plugin/checker/main.go b/plugin/checker/main.go new file mode 100644 index 0000000000..724bf18278 --- /dev/null +++ b/plugin/checker/main.go @@ -0,0 +1,126 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package main + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "go/ast" + + "golang.org/x/tools/go/packages" + + "github.com/pkg/errors" +) + +const pluginPackagePath = "github.com/mattermost/mattermost-server/plugin" + +func main() { + if err := runCheck(pluginPackagePath); err != nil { + fmt.Fprintln(os.Stderr, "#", pluginPackagePath) + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func runCheck(pkgPath string) error { + pkg, err := getPackage(pkgPath) + if err != nil { + return err + } + + apiInterface := findAPIInterface(pkg.Syntax) + if apiInterface == nil { + return errors.Errorf("could not find API interface in package %s", pkgPath) + } + + invalidMethods := findInvalidMethods(apiInterface.Methods.List) + if len(invalidMethods) > 0 { + return errors.New(renderErrorMessage(pkg, invalidMethods)) + } + return nil +} + +func getPackage(pkgPath string) (*packages.Package, error) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedSyntax, + } + pkgs, err := packages.Load(cfg, pkgPath) + if err != nil { + return nil, err + } + + if len(pkgs) == 0 { + return nil, errors.Errorf("could not find package %s", pkgPath) + } + return pkgs[0], nil +} + +func findAPIInterface(files []*ast.File) *ast.InterfaceType { + for _, f := range files { + var iface *ast.InterfaceType + + ast.Inspect(f, func(n ast.Node) bool { + if t, ok := n.(*ast.TypeSpec); ok { + if i, ok := t.Type.(*ast.InterfaceType); ok && t.Name.Name == "API" { + iface = i + return false + } + } + return true + }) + + if iface != nil { + return iface + } + } + return nil +} + +func findInvalidMethods(methods []*ast.Field) []*ast.Field { + var invalid []*ast.Field + for _, m := range methods { + if !hasValidMinimumVersionComment(m.Doc.Text()) { + invalid = append(invalid, m) + } + } + return invalid +} + +var versionRequirementRE = regexp.MustCompile(`^Minimum server version: \d+\.\d+(\.\d+)?$`) + +func hasValidMinimumVersionComment(s string) bool { + lines := strings.Split(strings.TrimSpace(s), "\n") + if len(lines) > 0 { + lastLine := lines[len(lines)-1] + return versionRequirementRE.MatchString(lastLine) + } + return false +} + +func renderErrorMessage(pkg *packages.Package, methods []*ast.Field) string { + cwd, _ := os.Getwd() + out := &bytes.Buffer{} + + for _, m := range methods { + pos := pkg.Fset.Position(m.Pos()) + filename, err := filepath.Rel(cwd, pos.Filename) + if err != nil { + // If deriving a relative path fails for some reason, + // we prefer to still print the absolute path to the file. + filename = pos.Filename + } + fmt.Fprintf(out, + "%s:%d:%d: missing a minimum server version comment\n", + filename, + pos.Line, + pos.Column, + ) + } + return out.String() +} diff --git a/plugin/checker/main_test.go b/plugin/checker/main_test.go new file mode 100644 index 0000000000..82c01837fe --- /dev/null +++ b/plugin/checker/main_test.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRunCheck(t *testing.T) { + testCases := []struct { + name, pkgPath, err string + }{ + { + name: "valid comments", + pkgPath: "github.com/mattermost/mattermost-server/plugin/checker/test/valid", + err: "", + }, + { + name: "invalid comments", + pkgPath: "github.com/mattermost/mattermost-server/plugin/checker/test/invalid", + err: "test/invalid/invalid.go:15:2: missing a minimum server version comment\n", + }, + { + name: "missing API interface", + pkgPath: "github.com/mattermost/mattermost-server/plugin/checker/test/missing", + err: "could not find API interface in package github.com/mattermost/mattermost-server/plugin/checker/test/missing", + }, + { + name: "non-existent package path", + pkgPath: "github.com/mattermost/mattermost-server/plugin/checker/test/does_not_exist", + err: "could not find API interface in package github.com/mattermost/mattermost-server/plugin/checker/test/does_not_exist", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := runCheck(tc.pkgPath) + + if tc.err != "" { + assert.EqualError(t, err, tc.err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/plugin/checker/test/invalid/invalid.go b/plugin/checker/test/invalid/invalid.go new file mode 100644 index 0000000000..dc7ec0cbbc --- /dev/null +++ b/plugin/checker/test/invalid/invalid.go @@ -0,0 +1,16 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package invalid + +type API interface { + // ValidMethod is a fake method for testing the + // plugin comment checker with a valid comment. + // + // Minimum server version: 1.2.3 + ValidMethod() + + // InvalidMethod is a fake method for testing the + // plugin comment checker with an invalid comment. + InvalidMethod() +} diff --git a/plugin/checker/test/missing/missing.go b/plugin/checker/test/missing/missing.go new file mode 100644 index 0000000000..14c0a7af87 --- /dev/null +++ b/plugin/checker/test/missing/missing.go @@ -0,0 +1,8 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package missing + +// SomeType is a fake interface for testing the plugin comment checker. +type SomeType interface { +} diff --git a/plugin/checker/test/valid/valid.go b/plugin/checker/test/valid/valid.go new file mode 100644 index 0000000000..d1fceafcac --- /dev/null +++ b/plugin/checker/test/valid/valid.go @@ -0,0 +1,12 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package valid + +type API interface { + // ValidMethod is a fake method for testing the + // plugin comment checker with a valid comment. + // + // Minimum server version: 1.2.3 + ValidMethod() +} diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 0000000000..98b3987b97 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + b, err := gcimporter.BExportData(fset, pkg) + if err != nil { + return err + } + _, err = out.Write(b) + return err +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 0000000000..efe221e7e1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go new file mode 100644 index 0000000000..2713dce64a --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/main.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// The gcexportdata command is a diagnostic tool that displays the +// contents of gc export data files. +package main + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" + "os" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/types/typeutil" +) + +var packageFlag = flag.String("package", "", "alternative package to print") + +func main() { + log.SetPrefix("gcexportdata: ") + log.SetFlags(0) + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + os.Exit(2) + } + filename := flag.Args()[0] + + f, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Decode the package. + const primary = "" + imports := make(map[string]*types.Package) + fset := token.NewFileSet() + pkg, err := gcexportdata.Read(r, fset, imports, primary) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Optionally select an indirectly mentioned package. + if *packageFlag != "" { + pkg = imports[*packageFlag] + if pkg == nil { + fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", + filename, *packageFlag) + for p := range imports { + if p != primary { + fmt.Fprintf(os.Stderr, "\t%s\n", p) + } + } + os.Exit(1) + } + } + + // Print all package-level declarations, including non-exported ones. + fmt.Printf("package %s\n", pkg.Name()) + for _, imp := range pkg.Imports() { + fmt.Printf("import %q\n", imp.Path()) + } + qual := func(p *types.Package) string { + if pkg == p { + return "" + } + return p.Name() + } + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + fmt.Printf("%s: %s\n", + fset.Position(obj.Pos()), + types.ObjectString(obj, qual)) + + // For types, print each method. + if _, ok := obj.(*types.TypeName); ok { + for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { + fmt.Printf("%s: %s\n", + fset.Position(method.Obj().Pos()), + types.SelectionString(method, qual)) + } + } + } +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 0000000000..a807d0aaa2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 0000000000..e3c3107825 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1036 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line int) token.Pos { + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + if predecl == nil { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + } + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 0000000000..f33dc5613e --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 0000000000..9cf186605f --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageId() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageId() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return types.NewInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageId(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 0000000000..be671c79b7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,723 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +// +build go1.11 + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// If no file set is provided, position info will be missing. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex, pkg) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64, localpkg *types.Package) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return pkgs[i].Path() < pkgs[j].Path() + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(pkg.Path()) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(pkg.Path()) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Embedded()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 0000000000..3cb7ae5b9e --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,606 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + const currentVersion = 0 + version := -1 + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + version = int(r.uint64()) + switch version { + case currentVersion: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + var localpkg *types.Package + for _, pkg := range pkgList { + if pkg.Path() == path { + localpkg = pkg + } + } + + names := make([]string, 0, len(p.pkgIndex[localpkg])) + for name := range p.pkgIndex[localpkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(localpkg, name) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + localpkg.SetImports(list) + + // package was imported completely and without errors + localpkg.MarkComplete() + + consumed, _ := r.Seek(0, io.SeekCurrent) + return int(consumed), localpkg, nil +} + +type iimporter struct { + ipath string + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } + + if r.prevFile == "" && r.prevLine == 0 { + return token.NoPos + } + + return r.p.fake.pos(r.prevFile, int(r.prevLine)) +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 0000000000..463f252271 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 0000000000..ab28b95cbb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 0000000000..fdc7da0568 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,160 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "log" + "os" + "os/exec" + "strings" + "time" +) + +var debug = false + +// GetSizes returns the sizes used by the underlying driver with the given parameters. +func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + // We did not find the driver, so use "go list". + tool = "off" + } + } + + if tool == "off" { + return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData) + } + + req, err := json.Marshal(struct { + Command string `json:"command"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + }{ + Command: "sizes", + Env: env, + BuildFlags: buildFlags, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, tool) + cmd.Dir = dir + cmd.Env = env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + } + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return response.Sizes, nil +} + +func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} + args = append(args, buildFlags...) + args = append(args, "--", "unsafe") + stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...) + if err != nil { + return nil, err + } + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not determine GOARCH and Go compiler") + } + goarch := fields[0] + compiler := fields[1] + return types.SizesFor(compiler, goarch), nil +} + +// InvokeGo returns the stdout of a go command invocation. +func InvokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, error) { + if debug { + defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) + } + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, env...), "PWD="+dir) + cmd.Dir = dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - executable not found + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + if !usesExportData { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) + } + + return stdout, nil +} + +func cmdDebugStr(envlist []string, args ...string) string { + env := make(map[string]string) + for _, kv := range envlist { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 0000000000..3799f8ed8b --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,222 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages, with each mode returning all the data of the +previous mode with some extra added. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 0000000000..22ff769ef2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,79 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" +) + +// Driver +type driverRequest struct { + Command string `json:"command"` + Mode LoadMode `json:"mode"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + Tests bool `json:"tests"` + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 0000000000..72c0c5d632 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,860 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/semver" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +// init fills in r with a driverResponse. +func (r *responseDeduper) init(dr *driverResponse) { + r.dr = dr + r.seenRoots = map[string]bool{} + r.seenPackages = map[string]*Package{} + for _, pkg := range dr.Packages { + r.seenPackages[pkg.ID] = pkg + } + for _, root := range dr.Roots { + r.seenRoots[root] = true + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + var sizes types.Sizes + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + sizes, sizeserr = getSizes(cfg) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + var packagesNamed []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "iamashamedtousethedisabledqueryname": + packagesNamed = append(packagesNamed, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + response := &responseDeduper{} + var err error + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := golistDriver(cfg, restPatterns...) + if err != nil { + return nil, err + } + response.init(dr) + } else { + response.init(&driverResponse{}) + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + // types.SizesFor always returns nil or a *types.StdSizes + response.dr.Sizes, _ = sizes.(*types.StdSizes) + + var containsCandidates []string + + if len(containFiles) != 0 { + if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil { + return nil, err + } + } + + if len(packagesNamed) != 0 { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + return nil, err + } + } + + modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response.dr) + if err != nil { + return nil, err + } + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{ + { + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }, + }, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + + return response.dr, nil +} + +func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := driver(cfg, pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := processGolistOverlay(cfg, response.dr) + if err != nil { + return err + } + addNeededOverlayPackages(cfg, driver, response, needPkgs) + return nil +} + +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := driver(cfg, pattern) + if err != nil { + return err + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + // calling `go env` isn't free; bail out if there's nothing to do. + if len(queries) == 0 { + return nil + } + // Determine which directories are relevant to scan. + roots, modRoot, err := roots(cfg) + if err != nil { + return err + } + + // Scan the selected directories. Simple matches, from GOPATH/GOROOT + // or the local module, can simply be "go list"ed. Matches from the + // module cache need special treatment. + var matchesMu sync.Mutex + var simpleMatches, modCacheMatches []string + add := func(root gopathwalk.Root, dir string) { + // Walk calls this concurrently; protect the result slices. + matchesMu.Lock() + defer matchesMu.Unlock() + + path := dir + if dir != root.Path { + path = dir[len(root.Path)+1:] + } + if pathMatchesQueries(path, queries) { + switch root.Type { + case gopathwalk.RootModuleCache: + modCacheMatches = append(modCacheMatches, path) + case gopathwalk.RootCurrentModule: + // We'd need to read go.mod to find the full + // import path. Relative's easier. + rel, err := filepath.Rel(cfg.Dir, dir) + if err != nil { + // This ought to be impossible, since + // we found dir in the current module. + panic(err) + } + simpleMatches = append(simpleMatches, "./"+rel) + case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: + simpleMatches = append(simpleMatches, path) + } + } + } + + startWalk := time.Now() + gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) + if debug { + log.Printf("%v for walk", time.Since(startWalk)) + } + + // Weird special case: the top-level package in a module will be in + // whatever directory the user checked the repository out into. It's + // more reasonable for that to not match the package name. So, if there + // are any Go files in the mod root, query it just to be safe. + if modRoot != "" { + rel, err := filepath.Rel(cfg.Dir, modRoot) + if err != nil { + panic(err) // See above. + } + + files, err := ioutil.ReadDir(modRoot) + for _, f := range files { + if strings.HasSuffix(f.Name(), ".go") { + simpleMatches = append(simpleMatches, rel) + break + } + } + } + + addResponse := func(r *driverResponse) { + for _, pkg := range r.Packages { + response.addPackage(pkg) + for _, name := range queries { + if pkg.Name == name { + response.addRoot(pkg.ID) + break + } + } + } + } + + if len(simpleMatches) != 0 { + resp, err := driver(cfg, simpleMatches...) + if err != nil { + return err + } + addResponse(resp) + } + + // Module cache matches are tricky. We want to avoid downloading new + // versions of things, so we need to use the ones present in the cache. + // go list doesn't accept version specifiers, so we have to write out a + // temporary module, and do the list in that module. + if len(modCacheMatches) != 0 { + // Collect all the matches, deduplicating by major version + // and preferring the newest. + type modInfo struct { + mod string + major string + } + mods := make(map[modInfo]string) + var imports []string + for _, modPath := range modCacheMatches { + matches := modCacheRegexp.FindStringSubmatch(modPath) + mod, ver := filepath.ToSlash(matches[1]), matches[2] + importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) + + major := semver.Major(ver) + if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { + mods[modInfo{mod, major}] = ver + } + + imports = append(imports, importPath) + } + + // Build the temporary module. + var gomod bytes.Buffer + gomod.WriteString("module modquery\nrequire (\n") + for mod, version := range mods { + gomod.WriteString("\t" + mod.mod + " " + version + "\n") + } + gomod.WriteString(")\n") + + tmpCfg := *cfg + + // We're only trying to look at stuff in the module cache, so + // disable the network. This should speed things up, and has + // prevented errors in at least one case, #28518. + tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...)) + + var err error + tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") + if err != nil { + return err + } + defer os.RemoveAll(tmpCfg.Dir) + + if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { + return fmt.Errorf("writing go.mod for module cache query: %v", err) + } + + // Run the query, using the import paths calculated from the matches above. + resp, err := driver(&tmpCfg, imports...) + if err != nil { + return fmt.Errorf("querying module cache matches: %v", err) + } + addResponse(resp) + } + + return nil +} + +func getSizes(cfg *Config) (types.Sizes, error) { + return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) +} + +// roots selects the appropriate paths to walk based on the passed-in configuration, +// particularly the environment and the presence of a go.mod in cfg.Dir's parents. +func roots(cfg *Config) ([]gopathwalk.Root, string, error) { + stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") + if err != nil { + return nil, "", err + } + + fields := strings.Split(stdout.String(), "\n") + if len(fields) != 4 || len(fields[3]) != 0 { + return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) + } + goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] + var modDir string + if gomod != "" { + modDir = filepath.Dir(gomod) + } + + var roots []gopathwalk.Root + // Always add GOROOT. + roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT}) + // If modules are enabled, scan the module dir. + if modDir != "" { + roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule}) + } + // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. + for _, p := range gopath { + if modDir != "" { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) + } else { + roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH}) + } + } + + return roots, modDir, nil +} + +// These functions were copied from goimports. See further documentation there. + +// pathMatchesQueries is adapted from pkgIsCandidate. +// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? +func pathMatchesQueries(path string, queries []string) bool { + lastTwo := lastTwoComponents(path) + for _, query := range queries { + if strings.Contains(lastTwo, query) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, query) { + return true + } + } + } + return false +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *jsonPackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := invokeGo(cfg, golistargs(cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + if old, found := seen[p.ImportPath]; found { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + // skip the duplicate + continue + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + }) + } + + response.Packages = append(response.Packages, pkg) + } + + return &response, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedDeps != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// invokeGo returns the stdout of a go command invocation. +func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) + cmd.Dir = cfg.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if debug { + defer func(start time.Time) { + log.Printf("%s for %v, stderr: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr) + }(time.Now()) + } + + if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(cmd, args...), stdout) + } + + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd, args ...string) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + var quotedArgs []string + for _, arg := range args { + quotedArgs = append(quotedArgs, strconv.Quote(arg)) + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 0000000000..ce322ce5ed --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,262 @@ +package packages + +import ( + "bytes" + "encoding/json" + "go/parser" + "go/token" + "path" + "path/filepath" + "strconv" + "strings" + "sync" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - test files +// - adding test and non-test files to test variants of packages +// - determining the correct package to add given a new import path +// - creating packages that don't exist +func processGolistOverlay(cfg *Config, response *driverResponse) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + for _, pkg := range response.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + } + + var rootDirs map[string]string + var onceGetRootDirs sync.Once + + for opath, contents := range cfg.Overlay { + base := filepath.Base(opath) + if strings.HasSuffix(opath, "_test.go") { + // Overlays don't support adding new test files yet. + // TODO(matloob): support adding new test files. + continue + } + dir := filepath.Dir(opath) + var pkg *Package + var fileExists bool + for _, p := range response.Packages { + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package. + if pkg == nil { + onceGetRootDirs.Do(func() { + rootDirs = determineRootDirs(cfg) + }) + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + var pkgPath string + for rdir, rpath := range rootDirs { + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + pkgPath = filepath.ToSlash(r) + if rpath != "" { + pkgPath = path.Join(rpath, pkgPath) + } + // We only create one new package even it can belong in multiple modules or GOPATH entries. + // This is okay because tools (such as the LSP) that use overlays will recompute the overlay + // once the file is saved, and golist will do the right thing. + // TODO(matloob): Implement module tiebreaking? + break + } + if pkgPath == "" { + continue + } + pkgName, ok := extractPackageName(opath, contents) + if !ok { + continue + } + id := pkgPath + // Try to reclaim a package with the same id if it exists in the response. + for _, p := range response.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package + if pkg == nil { + pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} + // TODO(matloob): Is it okay to amend response.Packages this way? + response.Packages = append(response.Packages, pkg) + havePkgs[pkg.PkgPath] = id + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + _, found := pkg.Imports[imp] + if !found { + // TODO(matloob): Handle cases when the following block isn't correct. + // These include imports of test variants, imports of vendored packages, etc. + id, ok := havePkgs[imp] + if !ok { + id = imp + } + pkg.Imports[imp] = &Package{ID: id} + } + } + continue + } + + // toPkgPath tries to guess the package path given the id. + // This isn't always correct -- it's certainly wrong for + // vendored packages' paths. + toPkgPath := func(id string) string { + // TODO(matloob): Handle vendor paths. + i := strings.IndexByte(id, ' ') + if i >= 0 { + return id[:i] + } + return id + } + + // Do another pass now that new packages have been created to determine the + // set of missing packages. + for _, pkg := range response.Packages { + for _, imp := range pkg.Imports { + pkgPath := toPkgPath(imp.ID) + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// determineRootDirs returns a mapping from directories code can be contained in to the +// corresponding import path prefixes of those directories. +// Its result is used to try to determine the import path for a package containing +// an overlay file. +func determineRootDirs(cfg *Config) map[string]string { + // Assume modules first: + out, err := invokeGo(cfg, "list", "-m", "-json", "all") + if err != nil { + return determineRootDirsGOPATH(cfg) + } + m := map[string]string{} + type jsonMod struct{ Path, Dir string } + for dec := json.NewDecoder(out); dec.More(); { + mod := new(jsonMod) + if err := dec.Decode(mod); err != nil { + return m // Give up and return an empty map. Package won't be found for overlay. + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + m[mod.Dir] = mod.Path + } + } + return m +} + +func determineRootDirsGOPATH(cfg *Config) map[string]string { + m := map[string]string{} + out, err := invokeGo(cfg, "env", "GOPATH") + if err != nil { + // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. + // When we try to find the import path for a directory, there will be no root-dir match and + // we'll give up. + return m + } + for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { + m[filepath.Join(p, "src")] = "" + } + return m +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 0000000000..cd151469a2 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1071 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. If NeedImports + // is not set NeedDeps has no effect. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports | NeedDeps + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that looks for an external driver binary, and if +// it does not find it falls back to the built in go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + return driver(cfg, patterns...) +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that were presented to the compiler. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // TODO(matloob): Add an implied mode here and use that instead of mode. + // Implied mode would contain all the fields we need the data for so we can + // get the actually requested fields. We'll zero them out before returning + // packages to the user. This will make it easier for us to get the conditions + // where we need certain modes right. +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + if ld.Mode&NeedTypes != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + lpkg := &loaderPackage{ + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && rootIndex < 0) || rootIndex >= 0 || + len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files + pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&(NeedImports|NeedDeps) == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedDeps != 0 { // TODO(matloob): This is only the case if NeedTypes is also set, right? + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + importPlaceholders := make(map[string]*Package) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.Mode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.Mode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.Mode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.Mode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.Mode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.Mode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.Mode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.Mode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.Mode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.Mode&NeedDeps == 0 { + for j, pkg := range ld.pkgs[i].Imports { + ph, ok := importPlaceholders[pkg.ID] + if !ok { + ph = &Package{ID: pkg.ID} + importPlaceholders[pkg.ID] = ph + } + ld.pkgs[i].Imports[j] = ph + } + } + } + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: (ld.Mode&(NeedDeps|NeedTypesInfo) == 0) && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedTypesInfo == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 0000000000..b13cb081fc --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,55 @@ +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 0000000000..7219c8e9ff --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// TraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var SkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == TraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 0000000000..ccffec5adc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 0000000000..ab7fbc0a9a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 0000000000..a3b26a7bae --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 0000000000..e880d358b1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 0000000000..a906b87595 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 0000000000..3369b1a0b2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 0000000000..04bb96a362 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + Debug bool // Enable debug logging + ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots(ctx *build.Context) []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(ctx.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + for _, root := range roots { + walkDir(root, add, opts) + } +} + +func walkDir(root Root, add func(Root, string), opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Debug { + log.Printf("skipping nonexistant directory: %v", root.Path) + } + return + } + if opts.Debug { + log.Printf("scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Debug { + log.Printf("scanned %s", root.Path) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options. +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if w.opts.Debug { + log.Printf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +func (w *walker) shouldSkipDir(fi os.FileInfo) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + return false +} + +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.SkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.SkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.TraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go new file mode 100644 index 0000000000..4af7118e55 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/modules.txt b/vendor/modules.txt index bfcff35ba2..aeaee65e16 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -299,6 +299,14 @@ golang.org/x/text/encoding/unicode golang.org/x/text/internal/tag golang.org/x/text/internal/utf8internal golang.org/x/text/runes +# golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b +golang.org/x/tools/go/packages +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/semver +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/internal/fastwalk # google.golang.org/appengine v1.6.1 google.golang.org/appengine/cloudsql # google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 From 39036ffb304c722c03e042382139288c785048bb Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Tue, 17 Sep 2019 12:20:23 +0200 Subject: [PATCH 44/53] =?UTF-8?q?Converting=20to=20structured=20logging=20?= =?UTF-8?q?the=20file=20app/cluster=5Fdiscover=E2=80=A6=20(#12204)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/cluster_discovery.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/app/cluster_discovery.go b/app/cluster_discovery.go index a8761f1a23..cbda90f9d0 100644 --- a/app/cluster_discovery.go +++ b/app/cluster_discovery.go @@ -4,7 +4,6 @@ package app import ( - "fmt" "time" "github.com/mattermost/mattermost-server/mlog" @@ -34,41 +33,41 @@ func (a *App) NewClusterDiscoveryService() *ClusterDiscoveryService { func (me *ClusterDiscoveryService) Start() { err := me.app.Srv.Store.ClusterDiscovery().Cleanup() if err != nil { - mlog.Error(fmt.Sprintf("ClusterDiscoveryService failed to cleanup the outdated cluster discovery information err=%v", err)) + mlog.Error("ClusterDiscoveryService failed to cleanup the outdated cluster discovery information", mlog.Err(err)) } exists, err := me.app.Srv.Store.ClusterDiscovery().Exists(&me.ClusterDiscovery) if err != nil { - mlog.Error(fmt.Sprintf("ClusterDiscoveryService failed to check if row exists for %v with err=%v", me.ClusterDiscovery.ToJson(), err)) + mlog.Error("ClusterDiscoveryService failed to check if row exists", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson()), mlog.Err(err)) } else { if exists { if _, err := me.app.Srv.Store.ClusterDiscovery().Delete(&me.ClusterDiscovery); err != nil { - mlog.Error(fmt.Sprintf("ClusterDiscoveryService failed to start clean for %v with err=%v", me.ClusterDiscovery.ToJson(), err)) + mlog.Error("ClusterDiscoveryService failed to start clean", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson()), mlog.Err(err)) } } } if err := me.app.Srv.Store.ClusterDiscovery().Save(&me.ClusterDiscovery); err != nil { - mlog.Error(fmt.Sprintf("ClusterDiscoveryService failed to save for %v with err=%v", me.ClusterDiscovery.ToJson(), err)) + mlog.Error("ClusterDiscoveryService failed to save", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson()), mlog.Err(err)) return } go func() { - mlog.Debug(fmt.Sprintf("ClusterDiscoveryService ping writer started for %v", me.ClusterDiscovery.ToJson())) + mlog.Debug("ClusterDiscoveryService ping writer started", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson())) ticker := time.NewTicker(DISCOVERY_SERVICE_WRITE_PING) defer func() { ticker.Stop() if _, err := me.app.Srv.Store.ClusterDiscovery().Delete(&me.ClusterDiscovery); err != nil { - mlog.Error(fmt.Sprintf("ClusterDiscoveryService failed to cleanup for %v with err=%v", me.ClusterDiscovery.ToJson(), err)) + mlog.Error("ClusterDiscoveryService failed to cleanup", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson()), mlog.Err(err)) } - mlog.Debug(fmt.Sprintf("ClusterDiscoveryService ping writer stopped for %v", me.ClusterDiscovery.ToJson())) + mlog.Debug("ClusterDiscoveryService ping writer stopped", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson())) }() for { select { case <-ticker.C: if err := me.app.Srv.Store.ClusterDiscovery().SetLastPingAt(&me.ClusterDiscovery); err != nil { - mlog.Error(fmt.Sprintf("ClusterDiscoveryService failed to write ping for %v with err=%v", me.ClusterDiscovery.ToJson(), err)) + mlog.Error("ClusterDiscoveryService failed to write ping", mlog.String("ClusterDiscovery", me.ClusterDiscovery.ToJson()), mlog.Err(err)) } case <-me.stop: return From b3517eaf2f7ce51898dcdf9130d75197cd0c1fa9 Mon Sep 17 00:00:00 2001 From: Eli Yukelzon Date: Tue, 17 Sep 2019 14:37:10 +0100 Subject: [PATCH 45/53] MM-17468 - Improving performance of fetching threads (#11980) fetchThreads parameter support in the API --- api4/post.go | 24 ++- app/auto_posts.go | 6 + app/channel_test.go | 2 +- app/command_loadtest.go | 35 +++++ app/file.go | 2 +- app/plugin_api.go | 8 +- app/post.go | 39 +++-- model/post.go | 18 ++- store/sqlstore/post_store.go | 199 ++++++++++++++----------- store/store.go | 10 +- store/storetest/mocks/PostStore.go | 70 ++++----- store/storetest/post_store.go | 228 +++++++++++++++++++++-------- store/storetest/reaction_store.go | 14 +- store/timer_layer.go | 37 +++-- 14 files changed, 457 insertions(+), 235 deletions(-) diff --git a/api4/post.go b/api4/post.go index 6d314350e9..2cb20ec040 100644 --- a/api4/post.go +++ b/api4/post.go @@ -133,6 +133,10 @@ func getPostsForChannel(c *Context, w http.ResponseWriter, r *http.Request) { return } } + skipFetchThreads := false + if r.URL.Query().Get("fetchThreads") == "false" { + skipFetchThreads = true + } channelId := c.Params.ChannelId page := c.Params.Page @@ -148,7 +152,7 @@ func getPostsForChannel(c *Context, w http.ResponseWriter, r *http.Request) { etag := "" if since > 0 { - list, err = c.App.GetPostsSince(channelId, since) + list, err = c.App.GetPostsSince(model.GetPostsSinceOptions{ChannelId: channelId, Time: since, SkipFetchThreads: skipFetchThreads}) } else if len(afterPost) > 0 { etag = c.App.GetPostsEtag(channelId) @@ -156,7 +160,7 @@ func getPostsForChannel(c *Context, w http.ResponseWriter, r *http.Request) { return } - list, err = c.App.GetPostsAfterPost(channelId, afterPost, page, perPage) + list, err = c.App.GetPostsAfterPost(model.GetPostsOptions{ChannelId: channelId, PostId: afterPost, Page: page, PerPage: perPage, SkipFetchThreads: skipFetchThreads}) } else if len(beforePost) > 0 { etag = c.App.GetPostsEtag(channelId) @@ -164,7 +168,7 @@ func getPostsForChannel(c *Context, w http.ResponseWriter, r *http.Request) { return } - list, err = c.App.GetPostsBeforePost(channelId, beforePost, page, perPage) + list, err = c.App.GetPostsBeforePost(model.GetPostsOptions{ChannelId: channelId, PostId: beforePost, Page: page, PerPage: perPage, SkipFetchThreads: skipFetchThreads}) } else { etag = c.App.GetPostsEtag(channelId) @@ -172,7 +176,7 @@ func getPostsForChannel(c *Context, w http.ResponseWriter, r *http.Request) { return } - list, err = c.App.GetPostsPage(channelId, page, perPage) + list, err = c.App.GetPostsPage(model.GetPostsOptions{ChannelId: channelId, Page: page, PerPage: perPage, SkipFetchThreads: skipFetchThreads}) } if err != nil { @@ -208,7 +212,11 @@ func getPostsForChannelAroundLastUnread(c *Context, w http.ResponseWriter, r *ht return } - postList, err := c.App.GetPostsForChannelAroundLastUnread(channelId, userId, c.Params.LimitBefore, c.Params.LimitAfter) + skipFetchThreads := false + if r.URL.Query().Get("fetchThreads") == "false" { + skipFetchThreads = true + } + postList, err := c.App.GetPostsForChannelAroundLastUnread(channelId, userId, c.Params.LimitBefore, c.Params.LimitAfter, skipFetchThreads) if err != nil { c.Err = err return @@ -222,7 +230,11 @@ func getPostsForChannelAroundLastUnread(c *Context, w http.ResponseWriter, r *ht return } - postList, err = c.App.GetPostsPage(channelId, app.PAGE_DEFAULT, c.Params.LimitBefore) + postList, err = c.App.GetPostsPage(model.GetPostsOptions{ChannelId: channelId, Page: app.PAGE_DEFAULT, PerPage: c.Params.LimitBefore, SkipFetchThreads: skipFetchThreads}) + if err != nil { + c.Err = err + return + } } postList.NextPostId = c.App.GetNextPostIdFromPostList(postList) diff --git a/app/auto_posts.go b/app/auto_posts.go index 0eda016678..b8837b4c9f 100644 --- a/app/auto_posts.go +++ b/app/auto_posts.go @@ -66,6 +66,10 @@ func (cfg *AutoPostCreator) UploadTestFile() ([]string, bool) { } func (cfg *AutoPostCreator) CreateRandomPost() (*model.Post, bool) { + return cfg.CreateRandomPostNested("", "") +} + +func (cfg *AutoPostCreator) CreateRandomPostNested(parentId, rootId string) (*model.Post, bool) { var fileIds []string if cfg.HasImage { var err1 bool @@ -84,6 +88,8 @@ func (cfg *AutoPostCreator) CreateRandomPost() (*model.Post, bool) { post := &model.Post{ ChannelId: cfg.channelid, + ParentId: parentId, + RootId: rootId, Message: postText, FileIds: fileIds} rpost, err2 := cfg.client.CreatePost(post) diff --git a/app/channel_test.go b/app/channel_test.go index bc2cd33b37..2e3eb40ff3 100644 --- a/app/channel_test.go +++ b/app/channel_test.go @@ -526,7 +526,7 @@ func TestAddChannelMemberNoUserRequestor(t *testing.T) { } assert.Equal(t, groupUserIds, channelMemberHistoryUserIds) - postList, err := th.App.Srv.Store.Post().GetPosts(channel.Id, 0, 1, false) + postList, err := th.App.Srv.Store.Post().GetPosts(model.GetPostsOptions{ChannelId: channel.Id, Page: 0, PerPage: 1}, false) require.Nil(t, err) if assert.Len(t, postList.Order, 1) { diff --git a/app/command_loadtest.go b/app/command_loadtest.go index 455d94ed74..01f5a1b27c 100644 --- a/app/command_loadtest.go +++ b/app/command_loadtest.go @@ -39,6 +39,9 @@ var usage = `Mattermost testing commands to help configure the system Example: /test channels fuzz 5 10 + ThreadedPost - create a large threaded post + /test threaded_post + Posts - Add some random posts with fuzz text to current channel. /test posts [fuzz] @@ -127,6 +130,10 @@ func (me *LoadTestProvider) DoCommand(a *App, args *model.CommandArgs, message s return me.PostCommand(a, args, message) } + if strings.HasPrefix(message, "threaded_post") { + return me.ThreadedPostCommand(a, args, message) + } + if strings.HasPrefix(message, "url") { return me.UrlCommand(a, args, message) } @@ -277,6 +284,34 @@ func (me *LoadTestProvider) ChannelsCommand(a *App, args *model.CommandArgs, mes return &model.CommandResponse{Text: "Added channels", ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL} } +func (me *LoadTestProvider) ThreadedPostCommand(a *App, args *model.CommandArgs, message string) *model.CommandResponse { + var usernames []string + options := &model.UserGetOptions{InTeamId: args.TeamId, Page: 0, PerPage: 1000} + if profileUsers, err := a.Srv.Store.User().GetProfiles(options); err == nil { + usernames = make([]string, len(profileUsers)) + i := 0 + for _, userprof := range profileUsers { + usernames[i] = userprof.Username + i++ + } + } + + client := model.NewAPIv4Client(args.SiteURL) + client.MockSession(args.Session.Token) + testPoster := NewAutoPostCreator(client, args.ChannelId) + testPoster.Fuzzy = true + testPoster.Users = usernames + rpost, ok := testPoster.CreateRandomPost() + if !ok { + return &model.CommandResponse{Text: "Cannot create a post", ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL} + } + for i := 0; i < 1000; i++ { + testPoster.CreateRandomPostNested(rpost.Id, rpost.Id) + } + + return &model.CommandResponse{Text: "Added threaded post", ResponseType: model.COMMAND_RESPONSE_TYPE_EPHEMERAL} +} + func (me *LoadTestProvider) PostsCommand(a *App, args *model.CommandArgs, message string) *model.CommandResponse { cmd := strings.TrimSpace(strings.TrimPrefix(message, "posts")) diff --git a/app/file.go b/app/file.go index 3ae1641560..2d9b71f5b2 100644 --- a/app/file.go +++ b/app/file.go @@ -279,7 +279,7 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { fileMigrationLock.Lock() defer fileMigrationLock.Unlock() - result, err := a.Srv.Store.Post().Get(post.Id) + result, err := a.Srv.Store.Post().Get(post.Id, false) if err != nil { mlog.Error(fmt.Sprintf("Unable to get post when migrating post to use FileInfos, err=%v", err), mlog.String("post_id", post.Id)) return []*model.FileInfo{} diff --git a/app/plugin_api.go b/app/plugin_api.go index 05ca69ab8b..867e2f587d 100644 --- a/app/plugin_api.go +++ b/app/plugin_api.go @@ -474,19 +474,19 @@ func (api *PluginAPI) GetPost(postId string) (*model.Post, *model.AppError) { } func (api *PluginAPI) GetPostsSince(channelId string, time int64) (*model.PostList, *model.AppError) { - return api.app.GetPostsSince(channelId, time) + return api.app.GetPostsSince(model.GetPostsSinceOptions{ChannelId: channelId, Time: time}) } func (api *PluginAPI) GetPostsAfter(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) { - return api.app.GetPostsAfterPost(channelId, postId, page, perPage) + return api.app.GetPostsAfterPost(model.GetPostsOptions{ChannelId: channelId, PostId: postId, Page: page, PerPage: perPage}) } func (api *PluginAPI) GetPostsBefore(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) { - return api.app.GetPostsBeforePost(channelId, postId, page, perPage) + return api.app.GetPostsBeforePost(model.GetPostsOptions{ChannelId: channelId, PostId: postId, Page: page, PerPage: perPage}) } func (api *PluginAPI) GetPostsForChannel(channelId string, page, perPage int) (*model.PostList, *model.AppError) { - return api.app.GetPostsPage(channelId, page, perPage) + return api.app.GetPostsPage(model.GetPostsOptions{ChannelId: channelId, Page: perPage, PerPage: page}) } func (api *PluginAPI) UpdatePost(post *model.Post) (*model.Post, *model.AppError) { diff --git a/app/post.go b/app/post.go index b54de9e1cc..d6f5e1d6a8 100644 --- a/app/post.go +++ b/app/post.go @@ -167,7 +167,7 @@ func (a *App) CreatePost(post *model.Post, channel *model.Channel, triggerWebhoo if len(post.RootId) > 0 { pchan = make(chan store.StoreResult, 1) go func() { - r, pErr := a.Srv.Store.Post().Get(post.RootId) + r, pErr := a.Srv.Store.Post().Get(post.RootId, true) pchan <- store.StoreResult{Data: r, Err: pErr} close(pchan) }() @@ -475,7 +475,7 @@ func (a *App) DeleteEphemeralPost(userId, postId string) { func (a *App) UpdatePost(post *model.Post, safeUpdate bool) (*model.Post, *model.AppError) { post.SanitizeProps() - postLists, err := a.Srv.Store.Post().Get(post.Id) + postLists, err := a.Srv.Store.Post().Get(post.Id, true) if err != nil { return nil, err } @@ -614,20 +614,20 @@ func (a *App) PatchPost(postId string, patch *model.PostPatch) (*model.Post, *mo return updatedPost, nil } -func (a *App) GetPostsPage(channelId string, page int, perPage int) (*model.PostList, *model.AppError) { - return a.Srv.Store.Post().GetPosts(channelId, page*perPage, perPage, true) +func (a *App) GetPostsPage(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + return a.Srv.Store.Post().GetPosts(options, false) } func (a *App) GetPosts(channelId string, offset int, limit int) (*model.PostList, *model.AppError) { - return a.Srv.Store.Post().GetPosts(channelId, offset, limit, true) + return a.Srv.Store.Post().GetPosts(model.GetPostsOptions{ChannelId: channelId, Page: offset, PerPage: limit}, true) } func (a *App) GetPostsEtag(channelId string) string { return a.Srv.Store.Post().GetEtag(channelId, true) } -func (a *App) GetPostsSince(channelId string, time int64) (*model.PostList, *model.AppError) { - return a.Srv.Store.Post().GetPostsSince(channelId, time, true) +func (a *App) GetPostsSince(options model.GetPostsSinceOptions) (*model.PostList, *model.AppError) { + return a.Srv.Store.Post().GetPostsSince(options, true) } func (a *App) GetSinglePost(postId string) (*model.Post, *model.AppError) { @@ -635,7 +635,7 @@ func (a *App) GetSinglePost(postId string) (*model.Post, *model.AppError) { } func (a *App) GetPostThread(postId string) (*model.PostList, *model.AppError) { - return a.Srv.Store.Post().Get(postId) + return a.Srv.Store.Post().Get(postId, false) } func (a *App) GetFlaggedPosts(userId string, offset int, limit int) (*model.PostList, *model.AppError) { @@ -651,7 +651,7 @@ func (a *App) GetFlaggedPostsForChannel(userId, channelId string, offset int, li } func (a *App) GetPermalinkPost(postId string, userId string) (*model.PostList, *model.AppError) { - list, err := a.Srv.Store.Post().Get(postId) + list, err := a.Srv.Store.Post().Get(postId, false) if err != nil { return nil, err } @@ -673,19 +673,19 @@ func (a *App) GetPermalinkPost(postId string, userId string) (*model.PostList, * return list, nil } -func (a *App) GetPostsBeforePost(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) { - return a.Srv.Store.Post().GetPostsBefore(channelId, postId, perPage, page*perPage) +func (a *App) GetPostsBeforePost(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + return a.Srv.Store.Post().GetPostsBefore(options) } -func (a *App) GetPostsAfterPost(channelId, postId string, page, perPage int) (*model.PostList, *model.AppError) { - return a.Srv.Store.Post().GetPostsAfter(channelId, postId, perPage, page*perPage) +func (a *App) GetPostsAfterPost(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + return a.Srv.Store.Post().GetPostsAfter(options) } -func (a *App) GetPostsAroundPost(postId, channelId string, offset, limit int, before bool) (*model.PostList, *model.AppError) { +func (a *App) GetPostsAroundPost(before bool, options model.GetPostsOptions) (*model.PostList, *model.AppError) { if before { - return a.Srv.Store.Post().GetPostsBefore(channelId, postId, limit, offset) + return a.Srv.Store.Post().GetPostsBefore(options) } - return a.Srv.Store.Post().GetPostsAfter(channelId, postId, limit, offset) + return a.Srv.Store.Post().GetPostsAfter(options) } func (a *App) GetPostAfterTime(channelId string, time int64) (*model.Post, *model.AppError) { @@ -773,8 +773,7 @@ func (a *App) AddCursorIdsForPostList(originalList *model.PostList, afterPost, b originalList.NextPostId = nextPostId originalList.PrevPostId = prevPostId } - -func (a *App) GetPostsForChannelAroundLastUnread(channelId, userId string, limitBefore, limitAfter int) (*model.PostList, *model.AppError) { +func (a *App) GetPostsForChannelAroundLastUnread(channelId, userId string, limitBefore, limitAfter int, skipFetchThreads bool) (*model.PostList, *model.AppError) { var member *model.ChannelMember var err *model.AppError if member, err = a.GetChannelMember(channelId, userId); err != nil { @@ -798,13 +797,13 @@ func (a *App) GetPostsForChannelAroundLastUnread(channelId, userId string, limit // channel organically, those replies will be added below. postList.Order = []string{lastUnreadPostId} - if postListBefore, err := a.GetPostsBeforePost(channelId, lastUnreadPostId, PAGE_DEFAULT, limitBefore); err != nil { + if postListBefore, err := a.GetPostsBeforePost(model.GetPostsOptions{ChannelId: channelId, PostId: lastUnreadPostId, Page: PAGE_DEFAULT, PerPage: limitBefore, SkipFetchThreads: skipFetchThreads}); err != nil { return nil, err } else if postListBefore != nil { postList.Extend(postListBefore) } - if postListAfter, err := a.GetPostsAfterPost(channelId, lastUnreadPostId, PAGE_DEFAULT, limitAfter-1); err != nil { + if postListAfter, err := a.GetPostsAfterPost(model.GetPostsOptions{ChannelId: channelId, PostId: lastUnreadPostId, Page: PAGE_DEFAULT, PerPage: limitAfter - 1, SkipFetchThreads: skipFetchThreads}); err != nil { return nil, err } else if postListAfter != nil { postList.Extend(postListAfter) diff --git a/model/post.go b/model/post.go index c2dad69163..93241af09e 100644 --- a/model/post.go +++ b/model/post.go @@ -73,7 +73,6 @@ type Post struct { OriginalId string `json:"original_id"` Message string `json:"message"` - // MessageSource will contain the message as submitted by the user if Message has been modified // by Mattermost for presentation (e.g if an image proxy is being used). It should be used to // populate edit boxes if present. @@ -88,7 +87,8 @@ type Post struct { HasReactions bool `json:"has_reactions,omitempty"` // Transient data populated before sending a post to the client - Metadata *PostMetadata `json:"metadata,omitempty" db:"-"` + ReplyCount int64 `json:"reply_count" db:"-"` + Metadata *PostMetadata `json:"metadata,omitempty" db:"-"` } type PostEphemeral struct { @@ -170,6 +170,20 @@ func (o *Post) ToUnsanitizedJson() string { return string(b) } +type GetPostsSinceOptions struct { + ChannelId string + Time int64 + SkipFetchThreads bool +} + +type GetPostsOptions struct { + ChannelId string + PostId string + Page int + PerPage int + SkipFetchThreads bool +} + func PostFromJson(data io.Reader) *Post { var o *Post json.NewDecoder(data).Decode(&o) diff --git a/store/sqlstore/post_store.go b/store/sqlstore/post_store.go index fa994a1aaa..4c8698cdbd 100644 --- a/store/sqlstore/post_store.go +++ b/store/sqlstore/post_store.go @@ -128,6 +128,12 @@ func (s *SqlPostStore) Save(post *model.Post) (*model.Post, *model.AppError) { if _, err := s.GetMaster().Exec("UPDATE Posts SET UpdateAt = :UpdateAt WHERE Id = :RootId", map[string]interface{}{"UpdateAt": time, "RootId": post.RootId}); err != nil { mlog.Error("Error updating Post UpdateAt.", mlog.Err(err)) } + } else { + if count, err := s.GetMaster().SelectInt("SELECT COUNT(*) FROM Posts WHERE RootId = :Id", map[string]interface{}{"Id": post.Id}); err != nil { + mlog.Error(fmt.Sprintf("Error fetching post's thread: %v", err.Error())) + } else { + post.ReplyCount = count + } } return post, nil @@ -266,7 +272,7 @@ func (s *SqlPostStore) GetFlaggedPostsForChannel(userId, channelId string, offse return pl, nil } -func (s *SqlPostStore) Get(id string) (*model.PostList, *model.AppError) { +func (s *SqlPostStore) Get(id string, skipFetchThreads bool) (*model.PostList, *model.AppError) { pl := model.NewPostList() if len(id) == 0 { @@ -274,35 +280,40 @@ func (s *SqlPostStore) Get(id string) (*model.PostList, *model.AppError) { } var post model.Post - err := s.GetReplica().SelectOne(&post, "SELECT * FROM Posts WHERE Id = :Id AND DeleteAt = 0", map[string]interface{}{"Id": id}) + var postFetchQuery string + if skipFetchThreads { + postFetchQuery = "SELECT p.*, (SELECT count(Posts.Id) FROM Posts WHERE Posts.RootId = p.RootId) FROM Posts p WHERE p.Id = :Id AND p.DeleteAt = 0" + } else { + postFetchQuery = "SELECT * FROM Posts WHERE Id = :Id AND DeleteAt = 0" + } + err := s.GetReplica().SelectOne(&post, postFetchQuery, map[string]interface{}{"Id": id}) if err != nil { return nil, model.NewAppError("SqlPostStore.GetPost", "store.sql_post.get.app_error", nil, "id="+id+err.Error(), http.StatusNotFound) } - pl.AddPost(&post) pl.AddOrder(id) + if !skipFetchThreads { + rootId := post.RootId - rootId := post.RootId + if rootId == "" { + rootId = post.Id + } - if rootId == "" { - rootId = post.Id + if len(rootId) == 0 { + return nil, model.NewAppError("SqlPostStore.GetPost", "store.sql_post.get.app_error", nil, "root_id="+rootId, http.StatusInternalServerError) + } + + var posts []*model.Post + _, err = s.GetReplica().Select(&posts, "SELECT * FROM Posts WHERE (Id = :Id OR RootId = :RootId) AND DeleteAt = 0", map[string]interface{}{"Id": rootId, "RootId": rootId}) + if err != nil { + return nil, model.NewAppError("SqlPostStore.GetPost", "store.sql_post.get.app_error", nil, "root_id="+rootId+err.Error(), http.StatusInternalServerError) + } + + for _, p := range posts { + pl.AddPost(p) + pl.AddOrder(p.Id) + } } - - if len(rootId) == 0 { - return nil, model.NewAppError("SqlPostStore.GetPost", "store.sql_post.get.app_error", nil, "root_id="+rootId, http.StatusInternalServerError) - } - - var posts []*model.Post - _, err = s.GetReplica().Select(&posts, "SELECT * FROM Posts WHERE (Id = :Id OR RootId = :RootId) AND DeleteAt = 0", map[string]interface{}{"Id": rootId, "RootId": rootId}) - if err != nil { - return nil, model.NewAppError("SqlPostStore.GetPost", "store.sql_post.get.app_error", nil, "root_id="+rootId+err.Error(), http.StatusInternalServerError) - } - - for _, p := range posts { - pl.AddPost(p) - pl.AddOrder(p.Id) - } - return pl, nil } @@ -445,14 +456,14 @@ func (s *SqlPostStore) PermanentDeleteByChannel(channelId string) *model.AppErro return nil } -func (s *SqlPostStore) GetPosts(channelId string, offset int, limit int, allowFromCache bool) (*model.PostList, *model.AppError) { - if limit > 1000 { - return nil, model.NewAppError("SqlPostStore.GetLinearPosts", "store.sql_post.get_posts.app_error", nil, "channelId="+channelId, http.StatusBadRequest) +func (s *SqlPostStore) GetPosts(options model.GetPostsOptions, allowFromCache bool) (*model.PostList, *model.AppError) { + if options.PerPage > 1000 { + return nil, model.NewAppError("SqlPostStore.GetLinearPosts", "store.sql_post.get_posts.app_error", nil, "channelId="+options.ChannelId, http.StatusBadRequest) } - + offset := options.PerPage * options.Page // Caching only occurs on limits of 30 and 60, the common limits requested by MM clients - if allowFromCache && offset == 0 && (limit == 60 || limit == 30) { - if cacheItem, ok := s.lastPostsCache.Get(fmt.Sprintf("%s%v", channelId, limit)); ok { + if allowFromCache && offset == 0 && (options.PerPage == 60 || options.PerPage == 30) { + if cacheItem, ok := s.lastPostsCache.Get(fmt.Sprintf("%s%v", options.ChannelId, options.PerPage)); ok { if s.metrics != nil { s.metrics.IncrementMemCacheHitCounter("Last Posts Cache") } @@ -466,13 +477,13 @@ func (s *SqlPostStore) GetPosts(channelId string, offset int, limit int, allowFr rpc := make(chan store.StoreResult, 1) go func() { - posts, err := s.getRootPosts(channelId, offset, limit) + posts, err := s.getRootPosts(options.ChannelId, offset, options.PerPage, options.SkipFetchThreads) rpc <- store.StoreResult{Data: posts, Err: err} close(rpc) }() cpc := make(chan store.StoreResult, 1) go func() { - posts, err := s.getParentsPosts(channelId, offset, limit) + posts, err := s.getParentsPosts(options.ChannelId, offset, options.PerPage, options.SkipFetchThreads) cpc <- store.StoreResult{Data: posts, Err: err} close(cpc) }() @@ -505,18 +516,18 @@ func (s *SqlPostStore) GetPosts(channelId string, offset int, limit int, allowFr list.MakeNonNil() // Caching only occurs on limits of 30 and 60, the common limits requested by MM clients - if offset == 0 && (limit == 60 || limit == 30) { - s.lastPostsCache.AddWithExpiresInSecs(fmt.Sprintf("%s%v", channelId, limit), list, LAST_POSTS_CACHE_SEC) + if offset == 0 && (options.PerPage == 60 || options.PerPage == 30) { + s.lastPostsCache.AddWithExpiresInSecs(fmt.Sprintf("%s%v", options.ChannelId, options.PerPage), list, LAST_POSTS_CACHE_SEC) } return list, err } -func (s *SqlPostStore) GetPostsSince(channelId string, time int64, allowFromCache bool) (*model.PostList, *model.AppError) { +func (s *SqlPostStore) GetPostsSince(options model.GetPostsSinceOptions, allowFromCache bool) (*model.PostList, *model.AppError) { if allowFromCache { // If the last post in the channel's time is less than or equal to the time we are getting posts since, // we can safely return no posts. - if cacheItem, ok := s.lastPostTimeCache.Get(channelId); ok && cacheItem.(int64) <= time { + if cacheItem, ok := s.lastPostTimeCache.Get(options.ChannelId); ok && cacheItem.(int64) <= options.Time { if s.metrics != nil { s.metrics.IncrementMemCacheHitCounter("Last Post Time") } @@ -556,19 +567,19 @@ func (s *SqlPostStore) GetPostsSince(channelId string, time int64, allowFromCach AND ChannelId = :ChannelId LIMIT 1000) temp_tab)) ORDER BY CreateAt DESC`, - map[string]interface{}{"ChannelId": channelId, "Time": time}) + map[string]interface{}{"ChannelId": options.ChannelId, "Time": options.Time}) if err != nil { - return nil, model.NewAppError("SqlPostStore.GetPostsSince", "store.sql_post.get_posts_since.app_error", nil, "channelId="+channelId+err.Error(), http.StatusInternalServerError) + return nil, model.NewAppError("SqlPostStore.GetPostsSince", "store.sql_post.get_posts_since.app_error", nil, "channelId="+options.ChannelId+err.Error(), http.StatusInternalServerError) } list := model.NewPostList() - latestUpdate := time + latestUpdate := options.Time for _, p := range posts { list.AddPost(p) - if p.UpdateAt > time { + if p.UpdateAt > options.Time { list.AddOrder(p.Id) } if latestUpdate < p.UpdateAt { @@ -576,21 +587,25 @@ func (s *SqlPostStore) GetPostsSince(channelId string, time int64, allowFromCach } } - s.lastPostTimeCache.AddWithExpiresInSecs(channelId, latestUpdate, LAST_POST_TIME_CACHE_SEC) + s.lastPostTimeCache.AddWithExpiresInSecs(options.ChannelId, latestUpdate, LAST_POST_TIME_CACHE_SEC) return list, nil } -func (s *SqlPostStore) GetPostsBefore(channelId string, postId string, limit int, offset int) (*model.PostList, *model.AppError) { - return s.getPostsAround(channelId, postId, limit, offset, true) +func (s *SqlPostStore) GetPostsBefore(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + return s.getPostsAround(true, options) } -func (s *SqlPostStore) GetPostsAfter(channelId string, postId string, limit int, offset int) (*model.PostList, *model.AppError) { - return s.getPostsAround(channelId, postId, limit, offset, false) +func (s *SqlPostStore) GetPostsAfter(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + return s.getPostsAround(false, options) } -func (s *SqlPostStore) getPostsAround(channelId string, postId string, limit int, offset int, before bool) (*model.PostList, *model.AppError) { - var direction, sort string +func (s *SqlPostStore) getPostsAround(before bool, options model.GetPostsOptions) (*model.PostList, *model.AppError) { + offset := options.Page * options.PerPage + var posts, parents []*model.Post + + var direction string + var sort string if before { direction = "<" sort = "DESC" @@ -598,23 +613,29 @@ func (s *SqlPostStore) getPostsAround(channelId string, postId string, limit int direction = ">" sort = "ASC" } + replyCountSubQuery := s.getQueryBuilder().Select("COUNT(Posts.Id)").From("Posts").Where(sq.Expr("p.RootId = '' AND RootId = p.Id")) + query := s.getQueryBuilder().Select("p.*") + if options.SkipFetchThreads { + query = query.Column(sq.Alias(replyCountSubQuery, "ReplyCount")) + } + query = query.From("Posts p"). + Where(sq.And{ + sq.Expr(`CreateAt `+direction+` (SELECT CreateAt FROM Posts WHERE Id = ?)`, options.PostId), + sq.Eq{"ChannelId": options.ChannelId}, + sq.Eq{"DeleteAt": int(0)}, + }). + OrderBy("CreateAt " + sort). + Limit(uint64(options.PerPage)). + Offset(uint64(offset)) + + queryString, args, err := query.ToSql() - var posts, parents []*model.Post - _, err := s.GetReplica().Select(&posts, - `SELECT - * - FROM - Posts - WHERE - CreateAt `+direction+` (SELECT CreateAt FROM Posts WHERE Id = :PostId) - AND ChannelId = :ChannelId - AND DeleteAt = 0 - ORDER BY CreateAt `+sort+` - LIMIT :Limit - OFFSET :Offset`, - map[string]interface{}{"ChannelId": channelId, "PostId": postId, "Limit": limit, "Offset": offset}) if err != nil { - return nil, model.NewAppError("SqlPostStore.GetPostContext", "store.sql_post.get_posts_around.get.app_error", nil, "channelId="+channelId+err.Error(), http.StatusInternalServerError) + return nil, model.NewAppError("SqlPostStore.GetPostContext", "store.sql_post.get_posts_around.get.app_error", nil, "channelId="+options.ChannelId+err.Error(), http.StatusInternalServerError) + } + _, err = s.GetMaster().Select(&posts, queryString, args...) + if err != nil { + return nil, model.NewAppError("SqlPostStore.GetPostContext", "store.sql_post.get_posts_around.get.app_error", nil, "channelId="+options.ChannelId+err.Error(), http.StatusInternalServerError) } if len(posts) > 0 { @@ -625,28 +646,29 @@ func (s *SqlPostStore) getPostsAround(channelId string, postId string, limit int rootIds = append(rootIds, post.RootId) } } + rootQuery := s.getQueryBuilder().Select("p.*") + if options.SkipFetchThreads { + rootQuery = rootQuery.Column(sq.Alias(replyCountSubQuery, "ReplyCount")) + } + rootQuery = rootQuery.From("Posts p"). + Where(sq.And{ + sq.Or{ + sq.Eq{"RootId": rootIds}, + sq.Eq{"Id": rootIds}, + }, + sq.Eq{"ChannelId": options.ChannelId}, + sq.Eq{"DeleteAt": 0}, + }). + OrderBy("CreateAt DESC") - keys, params := MapStringsToQueryParams(rootIds, "PostId") - - params["ChannelId"] = channelId - params["PostId"] = postId - params["Limit"] = limit - params["Offset"] = offset - - _, err = s.GetReplica().Select(&parents, - `SELECT - * - FROM - Posts - WHERE - (Id IN `+keys+` OR RootId IN `+keys+`) - AND ChannelId = :ChannelId - AND DeleteAt = 0 - ORDER BY CreateAt DESC`, - params) + rootQueryString, rootArgs, err := rootQuery.ToSql() if err != nil { - return nil, model.NewAppError("SqlPostStore.GetPostContext", "store.sql_post.get_posts_around.get_parent.app_error", nil, "channelId="+channelId+err.Error(), http.StatusInternalServerError) + return nil, model.NewAppError("SqlPostStore.GetPostContext", "store.sql_post.get_posts_around.get_parent.app_error", nil, "channelId="+options.ChannelId+err.Error(), http.StatusInternalServerError) + } + _, err = s.GetMaster().Select(&parents, rootQueryString, rootArgs...) + if err != nil { + return nil, model.NewAppError("SqlPostStore.GetPostContext", "store.sql_post.get_posts_around.get_parent.app_error", nil, "channelId="+options.ChannelId+err.Error(), http.StatusInternalServerError) } } @@ -745,20 +767,29 @@ func (s *SqlPostStore) GetPostAfterTime(channelId string, time int64) (*model.Po return post, nil } -func (s *SqlPostStore) getRootPosts(channelId string, offset int, limit int) ([]*model.Post, *model.AppError) { +func (s *SqlPostStore) getRootPosts(channelId string, offset int, limit int, skipFetchThreads bool) ([]*model.Post, *model.AppError) { var posts []*model.Post - _, err := s.GetReplica().Select(&posts, "SELECT * FROM Posts WHERE ChannelId = :ChannelId AND DeleteAt = 0 ORDER BY CreateAt DESC LIMIT :Limit OFFSET :Offset", map[string]interface{}{"ChannelId": channelId, "Offset": offset, "Limit": limit}) + var fetchQuery string + if skipFetchThreads { + fetchQuery = "SELECT p.*, (SELECT COUNT(Posts.Id) FROM Posts WHERE p.RootId = '' AND Posts.RootId = p.Id) as ReplyCount FROM Posts p WHERE ChannelId = :ChannelId AND DeleteAt = 0 ORDER BY CreateAt DESC LIMIT :Limit OFFSET :Offset" + } else { + fetchQuery = "SELECT * FROM Posts WHERE ChannelId = :ChannelId AND DeleteAt = 0 ORDER BY CreateAt DESC LIMIT :Limit OFFSET :Offset" + } + _, err := s.GetReplica().Select(&posts, fetchQuery, map[string]interface{}{"ChannelId": channelId, "Offset": offset, "Limit": limit}) if err != nil { return nil, model.NewAppError("SqlPostStore.GetLinearPosts", "store.sql_post.get_root_posts.app_error", nil, "channelId="+channelId+err.Error(), http.StatusInternalServerError) } return posts, nil } -func (s *SqlPostStore) getParentsPosts(channelId string, offset int, limit int) ([]*model.Post, *model.AppError) { +func (s *SqlPostStore) getParentsPosts(channelId string, offset int, limit int, skipFetchThreads bool) ([]*model.Post, *model.AppError) { var posts []*model.Post + replyCountQuery := "" + if skipFetchThreads { + replyCountQuery = ` ,(SELECT COUNT(Posts.Id) FROM Posts WHERE q2.RootId = '' AND Posts.RootId = q2.Id) as ReplyCount` + } _, err := s.GetReplica().Select(&posts, - `SELECT - q2.* + `SELECT q2.*`+replyCountQuery+` FROM Posts q2 INNER JOIN diff --git a/store/store.go b/store/store.go index c6fe2b1d5b..cc96278eee 100644 --- a/store/store.go +++ b/store/store.go @@ -200,18 +200,18 @@ type ChannelMemberHistoryStore interface { type PostStore interface { Save(post *model.Post) (*model.Post, *model.AppError) Update(newPost *model.Post, oldPost *model.Post) (*model.Post, *model.AppError) - Get(id string) (*model.PostList, *model.AppError) + Get(id string, skipFetchThreads bool) (*model.PostList, *model.AppError) GetSingle(id string) (*model.Post, *model.AppError) Delete(postId string, time int64, deleteByID string) *model.AppError PermanentDeleteByUser(userId string) *model.AppError PermanentDeleteByChannel(channelId string) *model.AppError - GetPosts(channelId string, offset int, limit int, allowFromCache bool) (*model.PostList, *model.AppError) + GetPosts(options model.GetPostsOptions, allowFromCache bool) (*model.PostList, *model.AppError) GetFlaggedPosts(userId string, offset int, limit int) (*model.PostList, *model.AppError) GetFlaggedPostsForTeam(userId, teamId string, offset int, limit int) (*model.PostList, *model.AppError) GetFlaggedPostsForChannel(userId, channelId string, offset int, limit int) (*model.PostList, *model.AppError) - GetPostsBefore(channelId string, postId string, numPosts int, offset int) (*model.PostList, *model.AppError) - GetPostsAfter(channelId string, postId string, numPosts int, offset int) (*model.PostList, *model.AppError) - GetPostsSince(channelId string, time int64, allowFromCache bool) (*model.PostList, *model.AppError) + GetPostsBefore(options model.GetPostsOptions) (*model.PostList, *model.AppError) + GetPostsAfter(options model.GetPostsOptions) (*model.PostList, *model.AppError) + GetPostsSince(options model.GetPostsSinceOptions, allowFromCache bool) (*model.PostList, *model.AppError) GetPostAfterTime(channelId string, time int64) (*model.Post, *model.AppError) GetPostIdAfterTime(channelId string, time int64) (string, *model.AppError) GetPostIdBeforeTime(channelId string, time int64) (string, *model.AppError) diff --git a/store/storetest/mocks/PostStore.go b/store/storetest/mocks/PostStore.go index acb8adfe7f..a023a9c13c 100644 --- a/store/storetest/mocks/PostStore.go +++ b/store/storetest/mocks/PostStore.go @@ -108,13 +108,13 @@ func (_m *PostStore) Delete(postId string, time int64, deleteByID string) *model return r0 } -// Get provides a mock function with given fields: id -func (_m *PostStore) Get(id string) (*model.PostList, *model.AppError) { - ret := _m.Called(id) +// Get provides a mock function with given fields: id, skipFetchThreads +func (_m *PostStore) Get(id string, skipFetchThreads bool) (*model.PostList, *model.AppError) { + ret := _m.Called(id, skipFetchThreads) var r0 *model.PostList - if rf, ok := ret.Get(0).(func(string) *model.PostList); ok { - r0 = rf(id) + if rf, ok := ret.Get(0).(func(string, bool) *model.PostList); ok { + r0 = rf(id, skipFetchThreads) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.PostList) @@ -122,8 +122,8 @@ func (_m *PostStore) Get(id string) (*model.PostList, *model.AppError) { } var r1 *model.AppError - if rf, ok := ret.Get(1).(func(string) *model.AppError); ok { - r1 = rf(id) + if rf, ok := ret.Get(1).(func(string, bool) *model.AppError); ok { + r1 = rf(id, skipFetchThreads) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) @@ -382,13 +382,13 @@ func (_m *PostStore) GetPostIdBeforeTime(channelId string, time int64) (string, return r0, r1 } -// GetPosts provides a mock function with given fields: channelId, offset, limit, allowFromCache -func (_m *PostStore) GetPosts(channelId string, offset int, limit int, allowFromCache bool) (*model.PostList, *model.AppError) { - ret := _m.Called(channelId, offset, limit, allowFromCache) +// GetPosts provides a mock function with given fields: options, allowFromCache +func (_m *PostStore) GetPosts(options model.GetPostsOptions, allowFromCache bool) (*model.PostList, *model.AppError) { + ret := _m.Called(options, allowFromCache) var r0 *model.PostList - if rf, ok := ret.Get(0).(func(string, int, int, bool) *model.PostList); ok { - r0 = rf(channelId, offset, limit, allowFromCache) + if rf, ok := ret.Get(0).(func(model.GetPostsOptions, bool) *model.PostList); ok { + r0 = rf(options, allowFromCache) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.PostList) @@ -396,8 +396,8 @@ func (_m *PostStore) GetPosts(channelId string, offset int, limit int, allowFrom } var r1 *model.AppError - if rf, ok := ret.Get(1).(func(string, int, int, bool) *model.AppError); ok { - r1 = rf(channelId, offset, limit, allowFromCache) + if rf, ok := ret.Get(1).(func(model.GetPostsOptions, bool) *model.AppError); ok { + r1 = rf(options, allowFromCache) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) @@ -407,13 +407,13 @@ func (_m *PostStore) GetPosts(channelId string, offset int, limit int, allowFrom return r0, r1 } -// GetPostsAfter provides a mock function with given fields: channelId, postId, numPosts, offset -func (_m *PostStore) GetPostsAfter(channelId string, postId string, numPosts int, offset int) (*model.PostList, *model.AppError) { - ret := _m.Called(channelId, postId, numPosts, offset) +// GetPostsAfter provides a mock function with given fields: options +func (_m *PostStore) GetPostsAfter(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + ret := _m.Called(options) var r0 *model.PostList - if rf, ok := ret.Get(0).(func(string, string, int, int) *model.PostList); ok { - r0 = rf(channelId, postId, numPosts, offset) + if rf, ok := ret.Get(0).(func(model.GetPostsOptions) *model.PostList); ok { + r0 = rf(options) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.PostList) @@ -421,8 +421,8 @@ func (_m *PostStore) GetPostsAfter(channelId string, postId string, numPosts int } var r1 *model.AppError - if rf, ok := ret.Get(1).(func(string, string, int, int) *model.AppError); ok { - r1 = rf(channelId, postId, numPosts, offset) + if rf, ok := ret.Get(1).(func(model.GetPostsOptions) *model.AppError); ok { + r1 = rf(options) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) @@ -457,13 +457,13 @@ func (_m *PostStore) GetPostsBatchForIndexing(startTime int64, endTime int64, li return r0, r1 } -// GetPostsBefore provides a mock function with given fields: channelId, postId, numPosts, offset -func (_m *PostStore) GetPostsBefore(channelId string, postId string, numPosts int, offset int) (*model.PostList, *model.AppError) { - ret := _m.Called(channelId, postId, numPosts, offset) +// GetPostsBefore provides a mock function with given fields: options +func (_m *PostStore) GetPostsBefore(options model.GetPostsOptions) (*model.PostList, *model.AppError) { + ret := _m.Called(options) var r0 *model.PostList - if rf, ok := ret.Get(0).(func(string, string, int, int) *model.PostList); ok { - r0 = rf(channelId, postId, numPosts, offset) + if rf, ok := ret.Get(0).(func(model.GetPostsOptions) *model.PostList); ok { + r0 = rf(options) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.PostList) @@ -471,8 +471,8 @@ func (_m *PostStore) GetPostsBefore(channelId string, postId string, numPosts in } var r1 *model.AppError - if rf, ok := ret.Get(1).(func(string, string, int, int) *model.AppError); ok { - r1 = rf(channelId, postId, numPosts, offset) + if rf, ok := ret.Get(1).(func(model.GetPostsOptions) *model.AppError); ok { + r1 = rf(options) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) @@ -532,13 +532,13 @@ func (_m *PostStore) GetPostsCreatedAt(channelId string, time int64) ([]*model.P return r0, r1 } -// GetPostsSince provides a mock function with given fields: channelId, time, allowFromCache -func (_m *PostStore) GetPostsSince(channelId string, time int64, allowFromCache bool) (*model.PostList, *model.AppError) { - ret := _m.Called(channelId, time, allowFromCache) +// GetPostsSince provides a mock function with given fields: options, allowFromCache +func (_m *PostStore) GetPostsSince(options model.GetPostsSinceOptions, allowFromCache bool) (*model.PostList, *model.AppError) { + ret := _m.Called(options, allowFromCache) var r0 *model.PostList - if rf, ok := ret.Get(0).(func(string, int64, bool) *model.PostList); ok { - r0 = rf(channelId, time, allowFromCache) + if rf, ok := ret.Get(0).(func(model.GetPostsSinceOptions, bool) *model.PostList); ok { + r0 = rf(options, allowFromCache) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*model.PostList) @@ -546,8 +546,8 @@ func (_m *PostStore) GetPostsSince(channelId string, time int64, allowFromCache } var r1 *model.AppError - if rf, ok := ret.Get(1).(func(string, int64, bool) *model.AppError); ok { - r1 = rf(channelId, time, allowFromCache) + if rf, ok := ret.Get(1).(func(model.GetPostsSinceOptions, bool) *model.AppError); ok { + r1 = rf(options, allowFromCache) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) diff --git a/store/storetest/post_store.go b/store/storetest/post_store.go index ceb9d56f21..e7379941c8 100644 --- a/store/storetest/post_store.go +++ b/store/storetest/post_store.go @@ -134,7 +134,7 @@ func testPostStoreGet(t *testing.T, ss store.Store) { t.Fatal("Invalid Etag") } - r1, err := ss.Post().Get(o1.Id) + r1, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -142,11 +142,11 @@ func testPostStoreGet(t *testing.T, ss store.Store) { t.Fatal("invalid returned post") } - if _, err = ss.Post().Get("123"); err == nil { + if _, err = ss.Post().Get("123", false); err == nil { t.Fatal("Missing id should have failed") } - if _, err = ss.Post().Get(""); err == nil { + if _, err = ss.Post().Get("", false); err == nil { t.Fatal("should fail for blank post ids") } } @@ -232,17 +232,17 @@ func testPostStoreUpdate(t *testing.T, ss store.Store) { o3, err = ss.Post().Save(o3) require.Nil(t, err) - r1, err := ss.Post().Get(o1.Id) + r1, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } ro1 := r1.Posts[o1.Id] - r2, err := ss.Post().Get(o1.Id) + r2, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } ro2 := r2.Posts[o2.Id] - r3, err := ss.Post().Get(o3.Id) + r3, err := ss.Post().Get(o3.Id, false) if err != nil { t.Fatal(err) } @@ -259,7 +259,7 @@ func testPostStoreUpdate(t *testing.T, ss store.Store) { t.Fatal(err) } - r1, err = ss.Post().Get(o1.Id) + r1, err = ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -276,7 +276,7 @@ func testPostStoreUpdate(t *testing.T, ss store.Store) { t.Fatal(err) } - r2, err = ss.Post().Get(o1.Id) + r2, err = ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -293,7 +293,7 @@ func testPostStoreUpdate(t *testing.T, ss store.Store) { t.Fatal(err) } - r3, err = ss.Post().Get(o3.Id) + r3, err = ss.Post().Get(o3.Id, false) if err != nil { t.Fatal(err) } @@ -311,7 +311,7 @@ func testPostStoreUpdate(t *testing.T, ss store.Store) { }) require.Nil(t, err) - r4, err := ss.Post().Get(o4.Id) + r4, err := ss.Post().Get(o4.Id, false) if err != nil { t.Fatal(err) } @@ -325,7 +325,7 @@ func testPostStoreUpdate(t *testing.T, ss store.Store) { t.Fatal(err) } - r4, err = ss.Post().Get(o4.Id) + r4, err = ss.Post().Get(o4.Id, false) if err != nil { t.Fatal(err) } @@ -352,7 +352,7 @@ func testPostStoreDelete(t *testing.T, ss store.Store) { o1, err := ss.Post().Save(o1) require.Nil(t, err) - if r1, err := ss.Post().Get(o1.Id); err != nil { + if r1, err := ss.Post().Get(o1.Id, false); err != nil { t.Fatal(err) } else { if r1.Posts[o1.Id].CreateAt != o1.CreateAt { @@ -371,7 +371,7 @@ func testPostStoreDelete(t *testing.T, ss store.Store) { t.Errorf("Expected (*Post).Props[model.POST_PROPS_DELETE_BY] to be %v but got %v.", deleteByID, actual) } - if r3, err := ss.Post().Get(o1.Id); err == nil { + if r3, err := ss.Post().Get(o1.Id, false); err == nil { t.Log(r3) t.Fatal("Missing id should have failed") } @@ -403,11 +403,11 @@ func testPostStoreDelete1Level(t *testing.T, ss store.Store) { t.Fatal(err) } - if _, err := ss.Post().Get(o1.Id); err == nil { + if _, err := ss.Post().Get(o1.Id, false); err == nil { t.Fatal("Deleted id should have failed") } - if _, err := ss.Post().Get(o2.Id); err == nil { + if _, err := ss.Post().Get(o2.Id, false); err == nil { t.Fatal("Deleted id should have failed") } } @@ -449,19 +449,19 @@ func testPostStoreDelete2Level(t *testing.T, ss store.Store) { t.Fatal(err) } - if _, err := ss.Post().Get(o1.Id); err == nil { + if _, err := ss.Post().Get(o1.Id, false); err == nil { t.Fatal("Deleted id should have failed") } - if _, err := ss.Post().Get(o2.Id); err == nil { + if _, err := ss.Post().Get(o2.Id, false); err == nil { t.Fatal("Deleted id should have failed") } - if _, err := ss.Post().Get(o3.Id); err == nil { + if _, err := ss.Post().Get(o3.Id, false); err == nil { t.Fatal("Deleted id should have failed") } - if _, err := ss.Post().Get(o4.Id); err != nil { + if _, err := ss.Post().Get(o4.Id, false); err != nil { t.Fatal(err) } } @@ -494,11 +494,11 @@ func testPostStorePermDelete1Level(t *testing.T, ss store.Store) { t.Fatal(err2) } - if _, err := ss.Post().Get(o1.Id); err != nil { + if _, err := ss.Post().Get(o1.Id, false); err != nil { t.Fatal("Deleted id shouldn't have failed") } - if _, err := ss.Post().Get(o2.Id); err == nil { + if _, err := ss.Post().Get(o2.Id, false); err == nil { t.Fatal("Deleted id should have failed") } @@ -506,7 +506,7 @@ func testPostStorePermDelete1Level(t *testing.T, ss store.Store) { t.Fatal(err) } - if _, err := ss.Post().Get(o3.Id); err == nil { + if _, err := ss.Post().Get(o3.Id, false); err == nil { t.Fatal("Deleted id should have failed") } } @@ -539,15 +539,15 @@ func testPostStorePermDelete1Level2(t *testing.T, ss store.Store) { t.Fatal(err2) } - if _, err := ss.Post().Get(o1.Id); err == nil { + if _, err := ss.Post().Get(o1.Id, false); err == nil { t.Fatal("Deleted id should have failed") } - if _, err := ss.Post().Get(o2.Id); err == nil { + if _, err := ss.Post().Get(o2.Id, false); err == nil { t.Fatal("Deleted id should have failed") } - if _, err := ss.Post().Get(o3.Id); err != nil { + if _, err := ss.Post().Get(o3.Id, false); err != nil { t.Fatal("Deleted id shouldn't have failed") } } @@ -578,7 +578,7 @@ func testPostStoreGetWithChildren(t *testing.T, ss store.Store) { o3, err = ss.Post().Save(o3) require.Nil(t, err) - pl, err := ss.Post().Get(o1.Id) + pl, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -591,7 +591,7 @@ func testPostStoreGetWithChildren(t *testing.T, ss store.Store) { t.Fatal(dErr) } - pl, err = ss.Post().Get(o1.Id) + pl, err = ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -604,7 +604,7 @@ func testPostStoreGetWithChildren(t *testing.T, ss store.Store) { t.Fatal(dErr) } - pl, err = ss.Post().Get(o1.Id) + pl, err = ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -670,7 +670,7 @@ func testPostStoreGetPostsWithDetails(t *testing.T, ss store.Store) { o5, err = ss.Post().Save(o5) require.Nil(t, err) - r1, err := ss.Post().GetPosts(o1.ChannelId, 0, 4, false) + r1, err := ss.Post().GetPosts(model.GetPostsOptions{ChannelId: o1.ChannelId, Page: 0, PerPage: 4}, false) require.Nil(t, err) if r1.Order[0] != o5.Id { @@ -697,7 +697,7 @@ func testPostStoreGetPostsWithDetails(t *testing.T, ss store.Store) { t.Fatal("Missing parent") } - r2, err := ss.Post().GetPosts(o1.ChannelId, 0, 4, true) + r2, err := ss.Post().GetPosts(model.GetPostsOptions{ChannelId: o1.ChannelId, Page: 0, PerPage: 4}, true) require.Nil(t, err) if r2.Order[0] != o5.Id { @@ -725,7 +725,7 @@ func testPostStoreGetPostsWithDetails(t *testing.T, ss store.Store) { } // Run once to fill cache - _, err = ss.Post().GetPosts(o1.ChannelId, 0, 30, true) + _, err = ss.Post().GetPosts(model.GetPostsOptions{ChannelId: o1.ChannelId, Page: 0, PerPage: 30}, true) require.Nil(t, err) o6 := &model.Post{} @@ -736,14 +736,14 @@ func testPostStoreGetPostsWithDetails(t *testing.T, ss store.Store) { require.Nil(t, err) // Should only be 6 since we hit the cache - r3, err := ss.Post().GetPosts(o1.ChannelId, 0, 30, true) + r3, err := ss.Post().GetPosts(model.GetPostsOptions{ChannelId: o1.ChannelId, Page: 0, PerPage: 30}, true) require.Nil(t, err) assert.Equal(t, 6, len(r3.Order)) ss.Post().InvalidateLastPostTimeCache(o1.ChannelId) // Cache was invalidated, we should get all the posts - r4, err := ss.Post().GetPosts(o1.ChannelId, 0, 30, true) + r4, err := ss.Post().GetPosts(model.GetPostsOptions{ChannelId: o1.ChannelId, Page: 0, PerPage: 30}, true) require.Nil(t, err) assert.Equal(t, 7, len(r4.Order)) } @@ -768,7 +768,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { } t.Run("should not return anything before the first post", func(t *testing.T) { - postList, err := ss.Post().GetPostsBefore(channelId, posts[0].Id, 10, 0) + postList, err := ss.Post().GetPostsBefore(model.GetPostsOptions{ChannelId: channelId, PostId: posts[0].Id, Page: 0, PerPage: 10}) assert.Nil(t, err) assert.Equal(t, []string{}, postList.Order) @@ -776,7 +776,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }) t.Run("should return posts before a post", func(t *testing.T) { - postList, err := ss.Post().GetPostsBefore(channelId, posts[5].Id, 10, 0) + postList, err := ss.Post().GetPostsBefore(model.GetPostsOptions{ChannelId: channelId, PostId: posts[5].Id, Page: 0, PerPage: 10}) assert.Nil(t, err) assert.Equal(t, []string{posts[4].Id, posts[3].Id, posts[2].Id, posts[1].Id, posts[0].Id}, postList.Order) @@ -790,7 +790,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }) t.Run("should limit posts before", func(t *testing.T) { - postList, err := ss.Post().GetPostsBefore(channelId, posts[5].Id, 2, 0) + postList, err := ss.Post().GetPostsBefore(model.GetPostsOptions{ChannelId: channelId, PostId: posts[5].Id, PerPage: 2}) assert.Nil(t, err) assert.Equal(t, []string{posts[4].Id, posts[3].Id}, postList.Order) @@ -801,7 +801,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }) t.Run("should not return anything after the last post", func(t *testing.T) { - postList, err := ss.Post().GetPostsAfter(channelId, posts[len(posts)-1].Id, 10, 0) + postList, err := ss.Post().GetPostsAfter(model.GetPostsOptions{ChannelId: channelId, PostId: posts[len(posts)-1].Id, PerPage: 10}) assert.Nil(t, err) assert.Equal(t, []string{}, postList.Order) @@ -809,7 +809,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }) t.Run("should return posts after a post", func(t *testing.T) { - postList, err := ss.Post().GetPostsAfter(channelId, posts[5].Id, 10, 0) + postList, err := ss.Post().GetPostsAfter(model.GetPostsOptions{ChannelId: channelId, PostId: posts[5].Id, PerPage: 10}) assert.Nil(t, err) assert.Equal(t, []string{posts[9].Id, posts[8].Id, posts[7].Id, posts[6].Id}, postList.Order) @@ -822,7 +822,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }) t.Run("should limit posts after", func(t *testing.T) { - postList, err := ss.Post().GetPostsAfter(channelId, posts[5].Id, 2, 0) + postList, err := ss.Post().GetPostsAfter(model.GetPostsOptions{ChannelId: channelId, PostId: posts[5].Id, PerPage: 2}) assert.Nil(t, err) assert.Equal(t, []string{posts[7].Id, posts[6].Id}, postList.Order) @@ -832,7 +832,6 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }, postList.Posts) }) }) - t.Run("with threads", func(t *testing.T) { channelId := model.NewId() userId := model.NewId() @@ -903,7 +902,7 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { post2.UpdateAt = post6.UpdateAt t.Run("should return each post and thread before a post", func(t *testing.T) { - postList, err := ss.Post().GetPostsBefore(channelId, post4.Id, 2, 0) + postList, err := ss.Post().GetPostsBefore(model.GetPostsOptions{ChannelId: channelId, PostId: post4.Id, PerPage: 2}) assert.Nil(t, err) assert.Equal(t, []string{post3.Id, post2.Id}, postList.Order) @@ -917,7 +916,116 @@ func testPostStoreGetPostsBeforeAfter(t *testing.T, ss store.Store) { }) t.Run("should return each post and the root of each thread after a post", func(t *testing.T) { - postList, err := ss.Post().GetPostsAfter(channelId, post4.Id, 2, 0) + postList, err := ss.Post().GetPostsAfter(model.GetPostsOptions{ChannelId: channelId, PostId: post4.Id, PerPage: 2}) + assert.Nil(t, err) + + assert.Equal(t, []string{post6.Id, post5.Id}, postList.Order) + assert.Equal(t, map[string]*model.Post{ + post2.Id: post2, + post4.Id: post4, + post5.Id: post5, + post6.Id: post6, + }, postList.Posts) + }) + }) + t.Run("with threads (skipFetchThreads)", func(t *testing.T) { + channelId := model.NewId() + userId := model.NewId() + + // This creates a series of posts that looks like: + // post1 + // post2 + // post3 (in response to post1) + // post4 (in response to post2) + // post5 + // post6 (in response to post2) + + post1, err := ss.Post().Save(&model.Post{ + ChannelId: channelId, + UserId: userId, + Message: "post1", + }) + require.Nil(t, err) + post1.ReplyCount = 1 + time.Sleep(time.Millisecond) + + post2, err := ss.Post().Save(&model.Post{ + ChannelId: channelId, + UserId: userId, + Message: "post2", + }) + require.Nil(t, err) + post2.ReplyCount = 2 + time.Sleep(time.Millisecond) + + post3, err := ss.Post().Save(&model.Post{ + ChannelId: channelId, + UserId: userId, + ParentId: post1.Id, + RootId: post1.Id, + Message: "post3", + }) + require.Nil(t, err) + time.Sleep(time.Millisecond) + + post4, err := ss.Post().Save(&model.Post{ + ChannelId: channelId, + UserId: userId, + RootId: post2.Id, + ParentId: post2.Id, + Message: "post4", + }) + require.Nil(t, err) + time.Sleep(time.Millisecond) + + post5, err := ss.Post().Save(&model.Post{ + ChannelId: channelId, + UserId: userId, + Message: "post5", + }) + require.Nil(t, err) + time.Sleep(time.Millisecond) + + post6, err := ss.Post().Save(&model.Post{ + ChannelId: channelId, + UserId: userId, + ParentId: post2.Id, + RootId: post2.Id, + Message: "post6", + }) + require.Nil(t, err) + + // Adding a post to a thread changes the UpdateAt timestamp of the parent post + post1.UpdateAt = post3.UpdateAt + post2.UpdateAt = post6.UpdateAt + + t.Run("should return each post and thread before a post", func(t *testing.T) { + postList, err := ss.Post().GetPostsBefore(model.GetPostsOptions{ChannelId: channelId, PostId: post4.Id, PerPage: 2, SkipFetchThreads: true}) + assert.Nil(t, err) + + assert.Equal(t, []string{post3.Id, post2.Id}, postList.Order) + assert.Equal(t, map[string]*model.Post{ + post1.Id: post1, + post2.Id: post2, + post3.Id: post3, + post4.Id: post4, + post6.Id: post6, + }, postList.Posts) + }) + + t.Run("should return each post and thread before a post with limit", func(t *testing.T) { + postList, err := ss.Post().GetPostsBefore(model.GetPostsOptions{ChannelId: channelId, PostId: post4.Id, PerPage: 1, SkipFetchThreads: true}) + assert.Nil(t, err) + + assert.Equal(t, []string{post3.Id}, postList.Order) + assert.Equal(t, map[string]*model.Post{ + post1.Id: post1, + post3.Id: post3, + }, postList.Posts) + }) + + t.Run("should return each post and the root of each thread after a post", func(t *testing.T) { + postList, err := ss.Post().GetPostsAfter(model.GetPostsOptions{ChannelId: channelId, PostId: post4.Id, PerPage: 2, SkipFetchThreads: true}) assert.Nil(t, err) assert.Equal(t, []string{post6.Id, post5.Id}, postList.Order) @@ -986,7 +1094,7 @@ func testPostStoreGetPostsSince(t *testing.T, ss store.Store) { require.Nil(t, err) time.Sleep(time.Millisecond) - postList, err := ss.Post().GetPostsSince(channelId, post3.CreateAt, false) + postList, err := ss.Post().GetPostsSince(model.GetPostsSinceOptions{ChannelId: channelId, Time: post3.CreateAt}, false) assert.Nil(t, err) assert.Equal(t, []string{ @@ -1017,7 +1125,7 @@ func testPostStoreGetPostsSince(t *testing.T, ss store.Store) { require.Nil(t, err) time.Sleep(time.Millisecond) - postList, err := ss.Post().GetPostsSince(channelId, post1.CreateAt, false) + postList, err := ss.Post().GetPostsSince(model.GetPostsSinceOptions{ChannelId: channelId, Time: post1.CreateAt}, false) assert.Nil(t, err) assert.Equal(t, []string{}, postList.Order) @@ -1039,12 +1147,12 @@ func testPostStoreGetPostsSince(t *testing.T, ss store.Store) { time.Sleep(time.Millisecond) // Make a request that returns no results - postList, err := ss.Post().GetPostsSince(channelId, post1.CreateAt, true) + postList, err := ss.Post().GetPostsSince(model.GetPostsSinceOptions{ChannelId: channelId, Time: post1.CreateAt}, true) require.Nil(t, err) require.Equal(t, model.NewPostList(), postList) // And then ensure that it doesn't cause future requests to also return no results - postList, err = ss.Post().GetPostsSince(channelId, post1.CreateAt-1, true) + postList, err = ss.Post().GetPostsSince(model.GetPostsSinceOptions{ChannelId: channelId, Time: post1.CreateAt - 1}, true) assert.Nil(t, err) assert.Equal(t, []string{post1.Id}, postList.Order) @@ -2173,17 +2281,17 @@ func testPostStoreOverwrite(t *testing.T, ss store.Store) { o3.Message = "zz" + model.NewId() + "QQQQQQQQQQ" o3, err = ss.Post().Save(o3) - r1, err := ss.Post().Get(o1.Id) + r1, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } ro1 := r1.Posts[o1.Id] - r2, err := ss.Post().Get(o1.Id) + r2, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } ro2 := r2.Posts[o2.Id] - r3, err := ss.Post().Get(o3.Id) + r3, err := ss.Post().Get(o3.Id, false) if err != nil { t.Fatal(err) } @@ -2201,7 +2309,7 @@ func testPostStoreOverwrite(t *testing.T, ss store.Store) { t.Fatal(err) } - r1, err = ss.Post().Get(o1.Id) + r1, err = ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -2219,7 +2327,7 @@ func testPostStoreOverwrite(t *testing.T, ss store.Store) { t.Fatal(err) } - r2, err = ss.Post().Get(o1.Id) + r2, err = ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } @@ -2237,7 +2345,7 @@ func testPostStoreOverwrite(t *testing.T, ss store.Store) { t.Fatal(err) } - r3, err = ss.Post().Get(o3.Id) + r3, err = ss.Post().Get(o3.Id, false) if err != nil { t.Fatal(err) } @@ -2255,7 +2363,7 @@ func testPostStoreOverwrite(t *testing.T, ss store.Store) { }) require.Nil(t, err) - r4, err := ss.Post().Get(o4.Id) + r4, err := ss.Post().Get(o4.Id, false) if err != nil { t.Fatal(err) } @@ -2270,7 +2378,7 @@ func testPostStoreOverwrite(t *testing.T, ss store.Store) { t.Fatal(err) } - r4, err = ss.Post().Get(o4.Id) + r4, err = ss.Post().Get(o4.Id, false) if err != nil { t.Fatal(err) } @@ -2306,17 +2414,17 @@ func testPostStoreGetPostsByIds(t *testing.T, ss store.Store) { o3, err = ss.Post().Save(o3) require.Nil(t, err) - r1, err := ss.Post().Get(o1.Id) + r1, err := ss.Post().Get(o1.Id, false) if err != nil { t.Fatal(err) } ro1 := r1.Posts[o1.Id] - r2, err := ss.Post().Get(o2.Id) + r2, err := ss.Post().Get(o2.Id, false) if err != nil { t.Fatal(err) } ro2 := r2.Posts[o2.Id] - r3, err := ss.Post().Get(o3.Id) + r3, err := ss.Post().Get(o3.Id, false) if err != nil { t.Fatal(err) } @@ -2444,15 +2552,15 @@ func testPostStorePermanentDeleteBatch(t *testing.T, ss store.Store) { _, err = ss.Post().PermanentDeleteBatch(2000, 1000) require.Nil(t, err) - if _, err := ss.Post().Get(o1.Id); err == nil { + if _, err := ss.Post().Get(o1.Id, false); err == nil { t.Fatalf("Should have not found post 1 after purge") } - if _, err := ss.Post().Get(o2.Id); err == nil { + if _, err := ss.Post().Get(o2.Id, false); err == nil { t.Fatalf("Should have not found post 2 after purge") } - if _, err := ss.Post().Get(o3.Id); err != nil { + if _, err := ss.Post().Get(o3.Id, false); err != nil { t.Fatalf("Should have not found post 3 after purge") } } diff --git a/store/storetest/reaction_store.go b/store/storetest/reaction_store.go index 83860c583c..06cf857d05 100644 --- a/store/storetest/reaction_store.go +++ b/store/storetest/reaction_store.go @@ -42,7 +42,7 @@ func testReactionSave(t *testing.T, ss store.Store) { } var secondUpdateAt int64 - postList, err := ss.Post().Get(reaction1.PostId) + postList, err := ss.Post().Get(reaction1.PostId, false) if err != nil { t.Fatal(err) } @@ -69,7 +69,7 @@ func testReactionSave(t *testing.T, ss store.Store) { t.Fatal(err) } - postList, err = ss.Post().Get(reaction2.PostId) + postList, err = ss.Post().Get(reaction2.PostId, false) if err != nil { t.Fatal(err) } @@ -123,7 +123,7 @@ func testReactionDelete(t *testing.T, ss store.Store) { _, err = ss.Reaction().Save(reaction) require.Nil(t, err) - result, err := ss.Post().Get(reaction.PostId) + result, err := ss.Post().Get(reaction.PostId, false) if err != nil { t.Fatal(err) } @@ -138,7 +138,7 @@ func testReactionDelete(t *testing.T, ss store.Store) { } else if len(reactions) != 0 { t.Fatal("should've deleted reaction") } - postList, err := ss.Post().Get(post.Id) + postList, err := ss.Post().Get(post.Id, false) if err != nil { t.Fatal(err) } @@ -316,7 +316,7 @@ func testReactionDeleteAllWithEmojiName(t *testing.T, ss store.Store) { } // check that the posts are updated - postList, err := ss.Post().Get(post.Id) + postList, err := ss.Post().Get(post.Id, false) if err != nil { t.Fatal(err) } @@ -324,7 +324,7 @@ func testReactionDeleteAllWithEmojiName(t *testing.T, ss store.Store) { t.Fatal("post should still have reactions") } - postList, err = ss.Post().Get(post2.Id) + postList, err = ss.Post().Get(post2.Id, false) if err != nil { t.Fatal(err) } @@ -332,7 +332,7 @@ func testReactionDeleteAllWithEmojiName(t *testing.T, ss store.Store) { t.Fatal("post should still have reactions") } - postList, err = ss.Post().Get(post3.Id) + postList, err = ss.Post().Get(post3.Id, false) if err != nil { t.Fatal(err) } diff --git a/store/timer_layer.go b/store/timer_layer.go index da3bcb5a8c..577852408e 100644 --- a/store/timer_layer.go +++ b/store/timer_layer.go @@ -3728,6 +3728,23 @@ func (s *TimerLayerOAuthStore) UpdateApp(app *model.OAuthApp) (*model.OAuthApp, return resultVar0, resultVar1 } +func (s *TimerLayerPluginStore) CompareAndDelete(keyVal *model.PluginKeyValue, oldValue []byte) (bool, *model.AppError) { + start := timemodule.Now() + + resultVar0, resultVar1 := s.PluginStore.CompareAndDelete(keyVal, oldValue) + + t := timemodule.Now() + elapsed := t.Sub(start) + if s.Root.Metrics != nil { + success := "false" + if resultVar1 == nil { + success = "true" + } + s.Root.Metrics.ObserveStoreMethodDuration("PluginStore.CompareAndDelete", success, float64(elapsed)) + } + return resultVar0, resultVar1 +} + func (s *TimerLayerPluginStore) CompareAndSet(keyVal *model.PluginKeyValue, oldValue []byte) (bool, *model.AppError) { start := timemodule.Now() @@ -3932,10 +3949,10 @@ func (s *TimerLayerPostStore) Delete(postId string, time int64, deleteByID strin return resultVar0 } -func (s *TimerLayerPostStore) Get(id string) (*model.PostList, *model.AppError) { +func (s *TimerLayerPostStore) Get(id string, skipFetchThreads bool) (*model.PostList, *model.AppError) { start := timemodule.Now() - resultVar0, resultVar1 := s.PostStore.Get(id) + resultVar0, resultVar1 := s.PostStore.Get(id, skipFetchThreads) t := timemodule.Now() elapsed := t.Sub(start) @@ -4136,10 +4153,10 @@ func (s *TimerLayerPostStore) GetPostIdBeforeTime(channelId string, time int64) return resultVar0, resultVar1 } -func (s *TimerLayerPostStore) GetPosts(channelId string, offset int, limit int, allowFromCache bool) (*model.PostList, *model.AppError) { +func (s *TimerLayerPostStore) GetPosts(options model.GetPostsOptions, allowFromCache bool) (*model.PostList, *model.AppError) { start := timemodule.Now() - resultVar0, resultVar1 := s.PostStore.GetPosts(channelId, offset, limit, allowFromCache) + resultVar0, resultVar1 := s.PostStore.GetPosts(options, allowFromCache) t := timemodule.Now() elapsed := t.Sub(start) @@ -4153,10 +4170,10 @@ func (s *TimerLayerPostStore) GetPosts(channelId string, offset int, limit int, return resultVar0, resultVar1 } -func (s *TimerLayerPostStore) GetPostsAfter(channelId string, postId string, numPosts int, offset int) (*model.PostList, *model.AppError) { +func (s *TimerLayerPostStore) GetPostsAfter(options model.GetPostsOptions) (*model.PostList, *model.AppError) { start := timemodule.Now() - resultVar0, resultVar1 := s.PostStore.GetPostsAfter(channelId, postId, numPosts, offset) + resultVar0, resultVar1 := s.PostStore.GetPostsAfter(options) t := timemodule.Now() elapsed := t.Sub(start) @@ -4187,10 +4204,10 @@ func (s *TimerLayerPostStore) GetPostsBatchForIndexing(startTime int64, endTime return resultVar0, resultVar1 } -func (s *TimerLayerPostStore) GetPostsBefore(channelId string, postId string, numPosts int, offset int) (*model.PostList, *model.AppError) { +func (s *TimerLayerPostStore) GetPostsBefore(options model.GetPostsOptions) (*model.PostList, *model.AppError) { start := timemodule.Now() - resultVar0, resultVar1 := s.PostStore.GetPostsBefore(channelId, postId, numPosts, offset) + resultVar0, resultVar1 := s.PostStore.GetPostsBefore(options) t := timemodule.Now() elapsed := t.Sub(start) @@ -4238,10 +4255,10 @@ func (s *TimerLayerPostStore) GetPostsCreatedAt(channelId string, time int64) ([ return resultVar0, resultVar1 } -func (s *TimerLayerPostStore) GetPostsSince(channelId string, time int64, allowFromCache bool) (*model.PostList, *model.AppError) { +func (s *TimerLayerPostStore) GetPostsSince(options model.GetPostsSinceOptions, allowFromCache bool) (*model.PostList, *model.AppError) { start := timemodule.Now() - resultVar0, resultVar1 := s.PostStore.GetPostsSince(channelId, time, allowFromCache) + resultVar0, resultVar1 := s.PostStore.GetPostsSince(options, allowFromCache) t := timemodule.Now() elapsed := t.Sub(start) From 46c624f4f8707d8d45344ad5fc9b9019dd992d2c Mon Sep 17 00:00:00 2001 From: Esdras Beleza Date: Tue, 17 Sep 2019 14:38:22 +0100 Subject: [PATCH 46/53] Migrate app/team_test.go to use testify (#12215) (#12226) * Migrate app/team_test.go to use testify (#12215) * Add missing message * Fix suggestions --- app/team_test.go | 384 +++++++++++++++++++---------------------------- 1 file changed, 154 insertions(+), 230 deletions(-) diff --git a/app/team_test.go b/app/team_test.go index 159c0fd61c..e9b7cd5462 100644 --- a/app/team_test.go +++ b/app/team_test.go @@ -26,14 +26,11 @@ func TestCreateTeam(t *testing.T) { Type: model.TEAM_OPEN, } - if _, err := th.App.CreateTeam(team); err != nil { - t.Log(err) - t.Fatal("Should create a new team") - } + _, err := th.App.CreateTeam(team) + require.Nil(t, err, "Should create a new team") - if _, err := th.App.CreateTeam(th.BasicTeam); err == nil { - t.Fatal("Should not create a new team - team already exist") - } + _, err = th.App.CreateTeam(th.BasicTeam) + require.NotNil(t, err, "Should not create a new team - team already exist") } func TestCreateTeamWithUser(t *testing.T) { @@ -48,13 +45,11 @@ func TestCreateTeamWithUser(t *testing.T) { Type: model.TEAM_OPEN, } - if _, err := th.App.CreateTeamWithUser(team, th.BasicUser.Id); err != nil { - t.Fatal("Should create a new team with existing user", err) - } + _, err := th.App.CreateTeamWithUser(team, th.BasicUser.Id) + require.Nil(t, err, "Should create a new team with existing user") - if _, err := th.App.CreateTeamWithUser(team, model.NewId()); err == nil { - t.Fatal("Should not create a new team - user does not exist") - } + _, err = th.App.CreateTeamWithUser(team, model.NewId()) + require.NotNil(t, err, "Should not create a new team - user does not exist") } func TestUpdateTeam(t *testing.T) { @@ -63,14 +58,9 @@ func TestUpdateTeam(t *testing.T) { th.BasicTeam.DisplayName = "Testing 123" - if updatedTeam, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } else { - if updatedTeam.DisplayName != "Testing 123" { - t.Fatal("Wrong Team DisplayName") - } - } + updatedTeam, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") + require.Equal(t, "Testing 123", updatedTeam.DisplayName, "Wrong Team DisplayName") } func TestAddUserToTeam(t *testing.T) { @@ -82,59 +72,45 @@ func TestAddUserToTeam(t *testing.T) { ruser, _ := th.App.CreateUser(&user) defer th.App.PermanentDeleteUser(&user) - if _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, ""); err != nil { - t.Log(err) - t.Fatal("Should add user to the team") - } + _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") + require.Nil(t, err, "Should add user to the team") }) t.Run("allow user by domain", func(t *testing.T) { th.BasicTeam.AllowedDomains = "example.com" - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) defer th.App.PermanentDeleteUser(&user) - if _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, ""); err != nil { - t.Log(err) - t.Fatal("Should have allowed whitelisted user") - } + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") + require.Nil(t, err, "Should have allowed whitelisted user") }) t.Run("block user by domain but allow bot", func(t *testing.T) { th.BasicTeam.AllowedDomains = "example.com" - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") user := model.User{Email: strings.ToLower(model.NewId()) + "test@invalid.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, err := th.App.CreateUser(&user) - if err != nil { - t.Fatalf("Error creating user: %s", err) - } + require.Nil(t, err, "Error creating user: %s", err) defer th.App.PermanentDeleteUser(&user) - if _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, ""); err == nil || err.Where != "JoinUserToTeam" { - t.Log(err) - t.Fatal("Should not add restricted user") - } + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") + require.NotNil(t, err, "Should not add restricted user") + require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") user = model.User{Email: strings.ToLower(model.NewId()) + "test@invalid.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), AuthService: "notnil", AuthData: model.NewString("notnil")} ruser, err = th.App.CreateUser(&user) - if err != nil { - t.Fatalf("Error creating authservice user: %s", err) - } + require.Nil(t, err, "Error creating authservice user: %s", err) defer th.App.PermanentDeleteUser(&user) - if _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, ""); err == nil || err.Where != "JoinUserToTeam" { - t.Log(err) - t.Fatal("Should not add authservice user") - } + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") + require.NotNil(t, err, "Should not add authservice user") + require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") bot, err := th.App.CreateBot(&model.Bot{ Username: "somebot", @@ -149,50 +125,45 @@ func TestAddUserToTeam(t *testing.T) { t.Run("block user with subdomain", func(t *testing.T) { th.BasicTeam.AllowedDomains = "example.com" - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") user := model.User{Email: strings.ToLower(model.NewId()) + "test@invalid.example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) defer th.App.PermanentDeleteUser(&user) - if _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, ""); err == nil || err.Where != "JoinUserToTeam" { - t.Log(err) - t.Fatal("Should not add restricted user") - } + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") + require.NotNil(t, err, "Should not add restricted user") + require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") }) t.Run("allow users by multiple domains", func(t *testing.T) { th.BasicTeam.AllowedDomains = "foo.com, bar.com" - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") user1 := model.User{Email: strings.ToLower(model.NewId()) + "success+test@foo.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser1, _ := th.App.CreateUser(&user1) + user2 := model.User{Email: strings.ToLower(model.NewId()) + "success+test@bar.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser2, _ := th.App.CreateUser(&user2) + user3 := model.User{Email: strings.ToLower(model.NewId()) + "success+test@invalid.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser3, _ := th.App.CreateUser(&user3) + defer th.App.PermanentDeleteUser(&user1) defer th.App.PermanentDeleteUser(&user2) defer th.App.PermanentDeleteUser(&user3) - if _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser1.Id, ""); err != nil { - t.Log(err) - t.Fatal("Should have allowed whitelisted user1") - } - if _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser2.Id, ""); err != nil { - t.Log(err) - t.Fatal("Should have allowed whitelisted user2") - } - if _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser3.Id, ""); err == nil || err.Where != "JoinUserToTeam" { - t.Log(err) - t.Fatal("Should not have allowed restricted user3") - } + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser1.Id, "") + require.Nil(t, err, "Should have allowed whitelisted user1") + + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser2.Id, "") + require.Nil(t, err, "Should have allowed whitelisted user2") + + _, err = th.App.AddUserToTeam(th.BasicTeam.Id, ruser3.Id, "") + require.NotNil(t, err, "Should not have allowed restricted user3") + require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") }) } @@ -206,9 +177,8 @@ func TestAddUserToTeamByToken(t *testing.T) { rguest := th.CreateGuest() t.Run("invalid token", func(t *testing.T) { - if _, err := th.App.AddUserToTeamByToken(ruser.Id, "123"); err == nil { - t.Fatal("Should fail on unexisting token") - } + _, err := th.App.AddUserToTeamByToken(ruser.Id, "123") + require.NotNil(t, err, "Should fail on unexisting token") }) t.Run("invalid token type", func(t *testing.T) { @@ -216,11 +186,12 @@ func TestAddUserToTeamByToken(t *testing.T) { TOKEN_TYPE_VERIFY_EMAIL, model.MapToJson(map[string]string{"teamId": th.BasicTeam.Id}), ) + require.Nil(t, th.App.Srv.Store.Token().Save(token)) defer th.App.DeleteToken(token) - if _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token); err == nil { - t.Fatal("Should fail on bad token type") - } + + _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token) + require.NotNil(t, err, "Should fail on bad token type") }) t.Run("expired token", func(t *testing.T) { @@ -228,12 +199,13 @@ func TestAddUserToTeamByToken(t *testing.T) { TOKEN_TYPE_TEAM_INVITATION, model.MapToJson(map[string]string{"teamId": th.BasicTeam.Id}), ) + token.CreateAt = model.GetMillis() - INVITATION_EXPIRY_TIME - 1 require.Nil(t, th.App.Srv.Store.Token().Save(token)) defer th.App.DeleteToken(token) - if _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token); err == nil { - t.Fatal("Should fail on expired token") - } + + _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token) + require.NotNil(t, err, "Should fail on expired token") }) t.Run("invalid team id", func(t *testing.T) { @@ -243,9 +215,9 @@ func TestAddUserToTeamByToken(t *testing.T) { ) require.Nil(t, th.App.Srv.Store.Token().Save(token)) defer th.App.DeleteToken(token) - if _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token); err == nil { - t.Fatal("Should fail on bad team id") - } + + _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token) + require.NotNil(t, err, "Should fail on bad team id") }) t.Run("invalid user id", func(t *testing.T) { @@ -255,9 +227,9 @@ func TestAddUserToTeamByToken(t *testing.T) { ) require.Nil(t, th.App.Srv.Store.Token().Save(token)) defer th.App.DeleteToken(token) - if _, err := th.App.AddUserToTeamByToken(model.NewId(), token.Token); err == nil { - t.Fatal("Should fail on bad user id") - } + + _, err := th.App.AddUserToTeamByToken(model.NewId(), token.Token) + require.NotNil(t, err, "Should fail on bad user id") }) t.Run("valid request", func(t *testing.T) { @@ -266,11 +238,10 @@ func TestAddUserToTeamByToken(t *testing.T) { model.MapToJson(map[string]string{"teamId": th.BasicTeam.Id}), ) require.Nil(t, th.App.Srv.Store.Token().Save(token)) - if _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token); err != nil { - t.Log(err) - t.Fatal("Should add user to the team") - } - _, err := th.App.Srv.Store.Token().GetByToken(token.Token) + _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token) + require.Nil(t, err, "Should add user to the team") + + _, err = th.App.Srv.Store.Token().GetByToken(token.Token) require.NotNil(t, err, "The token must be deleted after be used") members, err := th.App.GetChannelMembersForUser(th.BasicTeam.Id, ruser.Id) @@ -304,11 +275,11 @@ func TestAddUserToTeamByToken(t *testing.T) { model.MapToJson(map[string]string{"teamId": th.BasicTeam.Id, "channels": th.BasicChannel.Id}), ) require.Nil(t, th.App.Srv.Store.Token().Save(token)) - if _, err := th.App.AddUserToTeamByToken(rguest.Id, token.Token); err != nil { - t.Log(err) - t.Fatal("Should add user to the team") - } - _, err := th.App.Srv.Store.Token().GetByToken(token.Token) + + _, err := th.App.AddUserToTeamByToken(rguest.Id, token.Token) + require.Nil(t, err, "Should add user to the team") + + _, err = th.App.Srv.Store.Token().GetByToken(token.Token) require.NotNil(t, err, "The token must be deleted after be used") members, err := th.App.GetChannelMembersForUser(th.BasicTeam.Id, rguest.Id) @@ -319,35 +290,28 @@ func TestAddUserToTeamByToken(t *testing.T) { t.Run("group-constrained team", func(t *testing.T) { th.BasicTeam.GroupConstrained = model.NewBool(true) - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") token := model.NewToken( TOKEN_TYPE_TEAM_INVITATION, model.MapToJson(map[string]string{"teamId": th.BasicTeam.Id}), ) require.Nil(t, th.App.Srv.Store.Token().Save(token)) - if _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token); err == nil { - t.Fatal("Should return an error when trying to join a group-constrained team.") - } else { - require.Equal(t, "app.team.invite_token.group_constrained.error", err.Id) - } + + _, err = th.App.AddUserToTeamByToken(ruser.Id, token.Token) + require.NotNil(t, err, "Should return an error when trying to join a group-constrained team.") + require.Equal(t, "app.team.invite_token.group_constrained.error", err.Id) th.BasicTeam.GroupConstrained = model.NewBool(false) - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err = th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") }) t.Run("block user", func(t *testing.T) { th.BasicTeam.AllowedDomains = "example.com" - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") user := model.User{Email: strings.ToLower(model.NewId()) + "test@invalid.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) @@ -359,10 +323,9 @@ func TestAddUserToTeamByToken(t *testing.T) { ) require.Nil(t, th.App.Srv.Store.Token().Save(token)) - if _, err := th.App.AddUserToTeamByToken(ruser.Id, token.Token); err == nil || err.Where != "JoinUserToTeam" { - t.Log(err) - t.Fatal("Should not add restricted user") - } + _, err = th.App.AddUserToTeamByToken(ruser.Id, token.Token) + require.NotNil(t, err, "Should not add restricted user") + require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") }) } @@ -374,27 +337,22 @@ func TestAddUserToTeamByTeamId(t *testing.T) { user := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) - if err := th.App.AddUserToTeamByTeamId(th.BasicTeam.Id, ruser); err != nil { - t.Log(err) - t.Fatal("Should add user to the team") - } + err := th.App.AddUserToTeamByTeamId(th.BasicTeam.Id, ruser) + require.Nil(t, err, "Should add user to the team") }) t.Run("block user", func(t *testing.T) { th.BasicTeam.AllowedDomains = "example.com" - if _, err := th.App.UpdateTeam(th.BasicTeam); err != nil { - t.Log(err) - t.Fatal("Should update the team") - } + _, err := th.App.UpdateTeam(th.BasicTeam) + require.Nil(t, err, "Should update the team") user := model.User{Email: strings.ToLower(model.NewId()) + "test@invalid.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser, _ := th.App.CreateUser(&user) defer th.App.PermanentDeleteUser(&user) - if err := th.App.AddUserToTeamByTeamId(th.BasicTeam.Id, ruser); err == nil || err.Where != "JoinUserToTeam" { - t.Log(err) - t.Fatal("Should not add restricted user") - } + err = th.App.AddUserToTeamByTeamId(th.BasicTeam.Id, ruser) + require.NotNil(t, err, "Should not add restricted user") + require.Equal(t, "JoinUserToTeam", err.Where, "Error should be JoinUserToTeam") }) } @@ -409,9 +367,8 @@ func TestPermanentDeleteTeam(t *testing.T) { Email: "foo@foo.com", Type: model.TEAM_OPEN, }) - if err != nil { - t.Fatal(err.Error()) - } + require.Nil(t, err, "Should create a team") + defer func() { th.App.PermanentDeleteTeam(team) }() @@ -423,21 +380,19 @@ func TestPermanentDeleteTeam(t *testing.T) { URL: "http://foo", Method: model.COMMAND_METHOD_POST, }) - if err != nil { - t.Fatal(err.Error()) - } + require.Nil(t, err, "Should create a command") defer th.App.DeleteCommand(command.Id) - if command, err = th.App.GetCommand(command.Id); command == nil || err != nil { - t.Fatal("unable to get new command") - } + command, err = th.App.GetCommand(command.Id) + require.NotNil(t, command, "command should not be nil") + require.Nil(t, err, "unable to get new command") err = th.App.PermanentDeleteTeam(team) require.Nil(t, err) - if command, err = th.App.GetCommand(command.Id); command != nil || err == nil { - t.Fatal("command wasn't deleted") - } + command, err = th.App.GetCommand(command.Id) + require.Nil(t, command, "command wasn't deleted") + require.NotNil(t, err, "should not return an error") // Test deleting a team with no channels. team = th.CreateTeam() @@ -445,19 +400,16 @@ func TestPermanentDeleteTeam(t *testing.T) { th.App.PermanentDeleteTeam(team) }() - if channels, err := th.App.GetPublicChannelsForTeam(team.Id, 0, 1000); err != nil { - t.Fatal(err) - } else { - for _, channel := range *channels { - if err2 := th.App.PermanentDeleteChannel(channel); err2 != nil { - t.Fatal(err) - } - } + channels, err := th.App.GetPublicChannelsForTeam(team.Id, 0, 1000) + require.Nil(t, err) + + for _, channel := range *channels { + err2 := th.App.PermanentDeleteChannel(channel) + require.Nil(t, err2) } - if err := th.App.PermanentDeleteTeam(team); err != nil { - t.Fatal(err) - } + err = th.App.PermanentDeleteTeam(team) + require.Nil(t, err) } func TestSanitizeTeam(t *testing.T) { @@ -469,6 +421,7 @@ func TestSanitizeTeam(t *testing.T) { Email: th.MakeEmail(), AllowedDomains: "example.com", } + copyTeam := func() *model.Team { copy := &model.Team{} *copy = *team @@ -489,9 +442,7 @@ func TestSanitizeTeam(t *testing.T) { } sanitized := th.App.SanitizeTeam(session, copyTeam()) - if sanitized.Email != "" { - t.Fatal("should've sanitized team") - } + require.Empty(t, sanitized.Email, "should've sanitized team") }) t.Run("user of the team", func(t *testing.T) { @@ -508,9 +459,7 @@ func TestSanitizeTeam(t *testing.T) { } sanitized := th.App.SanitizeTeam(session, copyTeam()) - if sanitized.Email != "" { - t.Fatal("should've sanitized team") - } + require.Empty(t, sanitized.Email, "should've sanitized team") }) t.Run("team admin", func(t *testing.T) { @@ -527,9 +476,7 @@ func TestSanitizeTeam(t *testing.T) { } sanitized := th.App.SanitizeTeam(session, copyTeam()) - if sanitized.Email == "" { - t.Fatal("shouldn't have sanitized team") - } + require.NotEmpty(t, sanitized.Email, "shouldn't have sanitized team") }) t.Run("team admin of another team", func(t *testing.T) { @@ -546,9 +493,7 @@ func TestSanitizeTeam(t *testing.T) { } sanitized := th.App.SanitizeTeam(session, copyTeam()) - if sanitized.Email != "" { - t.Fatal("should've sanitized team") - } + require.Empty(t, sanitized.Email, "should've sanitized team") }) t.Run("system admin, not a user of team", func(t *testing.T) { @@ -565,9 +510,7 @@ func TestSanitizeTeam(t *testing.T) { } sanitized := th.App.SanitizeTeam(session, copyTeam()) - if sanitized.Email == "" { - t.Fatal("shouldn't have sanitized team") - } + require.NotEmpty(t, sanitized.Email, "shouldn't have sanitized team") }) t.Run("system admin, user of team", func(t *testing.T) { @@ -584,9 +527,7 @@ func TestSanitizeTeam(t *testing.T) { } sanitized := th.App.SanitizeTeam(session, copyTeam()) - if sanitized.Email == "" { - t.Fatal("shouldn't have sanitized team") - } + require.NotEmpty(t, sanitized.Email, "shouldn't have sanitized team") }) } @@ -627,13 +568,8 @@ func TestSanitizeTeams(t *testing.T) { sanitized := th.App.SanitizeTeams(session, teams) - if sanitized[0].Email != "" { - t.Fatal("should've sanitized first team") - } - - if sanitized[1].Email == "" { - t.Fatal("shouldn't have sanitized second team") - } + require.Empty(t, sanitized[0].Email, "should've sanitized first team") + require.NotEmpty(t, sanitized[1].Email, "shouldn't have sanitized second team") }) t.Run("system admin", func(t *testing.T) { @@ -663,14 +599,8 @@ func TestSanitizeTeams(t *testing.T) { } sanitized := th.App.SanitizeTeams(session, teams) - - if sanitized[0].Email == "" { - t.Fatal("shouldn't have sanitized first team") - } - - if sanitized[1].Email == "" { - t.Fatal("shouldn't have sanitized second team") - } + assert.NotEmpty(t, sanitized[0].Email, "shouldn't have sanitized first team") + assert.NotEmpty(t, sanitized[1].Email, "shouldn't have sanitized second team") }) } @@ -686,10 +616,8 @@ func TestJoinUserToTeam(t *testing.T) { Type: model.TEAM_OPEN, } - if _, err := th.App.CreateTeam(team); err != nil { - t.Log(err) - t.Fatal("Should create a new team") - } + _, err := th.App.CreateTeam(team) + require.Nil(t, err, "Should create a new team") maxUsersPerTeam := th.App.Config().TeamSettings.MaxUsersPerTeam defer func() { @@ -704,9 +632,9 @@ func TestJoinUserToTeam(t *testing.T) { ruser, _ := th.App.CreateUser(&user) defer th.App.PermanentDeleteUser(&user) - if _, alreadyAdded, err := th.App.joinUserToTeam(team, ruser); alreadyAdded || err != nil { - t.Fatal("Should return already added equal to false and no error") - } + _, alreadyAdded, err := th.App.joinUserToTeam(team, ruser) + require.False(t, alreadyAdded, "Should return already added equal to false") + require.Nil(t, err, "Should return no error") }) t.Run("join when you are a member", func(t *testing.T) { @@ -715,9 +643,10 @@ func TestJoinUserToTeam(t *testing.T) { defer th.App.PermanentDeleteUser(&user) th.App.joinUserToTeam(team, ruser) - if _, alreadyAdded, err := th.App.joinUserToTeam(team, ruser); !alreadyAdded || err != nil { - t.Fatal("Should return already added and no error") - } + + _, alreadyAdded, err := th.App.joinUserToTeam(team, ruser) + require.True(t, alreadyAdded, "Should return already added") + require.Nil(t, err, "Should return no error") }) t.Run("re-join after leaving", func(t *testing.T) { @@ -727,9 +656,10 @@ func TestJoinUserToTeam(t *testing.T) { th.App.joinUserToTeam(team, ruser) th.App.LeaveTeam(team, ruser, ruser.Id) - if _, alreadyAdded, err := th.App.joinUserToTeam(team, ruser); alreadyAdded || err != nil { - t.Fatal("Should return already added equal to false and no error") - } + + _, alreadyAdded, err := th.App.joinUserToTeam(team, ruser) + require.False(t, alreadyAdded, "Should return already added equal to false") + require.Nil(t, err, "Should return no error") }) t.Run("new join with limit problem", func(t *testing.T) { @@ -737,28 +667,31 @@ func TestJoinUserToTeam(t *testing.T) { ruser1, _ := th.App.CreateUser(&user1) user2 := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser2, _ := th.App.CreateUser(&user2) + defer th.App.PermanentDeleteUser(&user1) defer th.App.PermanentDeleteUser(&user2) th.App.joinUserToTeam(team, ruser1) - if _, _, err := th.App.joinUserToTeam(team, ruser2); err == nil { - t.Fatal("Should fail") - } + + _, _, err := th.App.joinUserToTeam(team, ruser2) + require.NotNil(t, err, "Should fail") }) t.Run("re-join alfter leaving with limit problem", func(t *testing.T) { user1 := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser1, _ := th.App.CreateUser(&user1) + user2 := model.User{Email: strings.ToLower(model.NewId()) + "success+test@example.com", Nickname: "Darth Vader", Username: "vader" + model.NewId(), Password: "passwd1", AuthService: ""} ruser2, _ := th.App.CreateUser(&user2) + defer th.App.PermanentDeleteUser(&user1) defer th.App.PermanentDeleteUser(&user2) th.App.joinUserToTeam(team, ruser1) th.App.LeaveTeam(team, ruser1, ruser1.Id) th.App.joinUserToTeam(team, ruser2) - if _, _, err := th.App.joinUserToTeam(team, ruser1); err == nil { - t.Fatal("Should fail") - } + + _, _, err := th.App.joinUserToTeam(team, ruser1) + require.NotNil(t, err, "Should fail") }) } @@ -771,13 +704,8 @@ func TestAppUpdateTeamScheme(t *testing.T) { team.SchemeId = mockID updatedTeam, err := th.App.UpdateTeamScheme(th.BasicTeam) - if err != nil { - t.Fatal(err) - } - - if updatedTeam.SchemeId != mockID { - t.Fatal("Wrong Team SchemeId") - } + require.Nil(t, err) + require.Equal(t, mockID, updatedTeam.SchemeId, "Wrong Team SchemeId") } func TestGetTeamMembers(t *testing.T) { @@ -811,6 +739,7 @@ func TestGetTeamMembers(t *testing.T) { // Fetch team members multipile times members, err := th.App.GetTeamMembers(th.BasicTeam.Id, 0, 5, nil) require.Nil(t, err) + // This should return 5 members members2, err := th.App.GetTeamMembers(th.BasicTeam.Id, 5, 6, nil) require.Nil(t, err) @@ -897,9 +826,8 @@ func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) { _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") require.Nil(t, err) - if _, err := th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_user"); err == nil { - t.Fatal("Should fail when try to modify the guest role") - } + _, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_user") + require.NotNil(t, err, "Should fail when try to modify the guest role") }) t.Run("from user to guest", func(t *testing.T) { @@ -909,9 +837,8 @@ func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) { _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") require.Nil(t, err) - if _, err := th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest"); err == nil { - t.Fatal("Should fail when try to modify the guest role") - } + _, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest") + require.NotNil(t, err, "Should fail when try to modify the guest role") }) t.Run("from user to admin", func(t *testing.T) { @@ -921,9 +848,8 @@ func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) { _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") require.Nil(t, err) - if _, err := th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_user team_admin"); err != nil { - t.Fatal("Should work when you not modify guest role") - } + _, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_user team_admin") + require.Nil(t, err, "Should work when you not modify guest role") }) t.Run("from guest to guest plus custom", func(t *testing.T) { @@ -936,9 +862,8 @@ func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) { _, err = th.App.CreateRole(&model.Role{Name: "custom", DisplayName: "custom", Description: "custom"}) require.Nil(t, err) - if _, err := th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest custom"); err != nil { - t.Fatal("Should work when you not modify guest role") - } + _, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest custom") + require.Nil(t, err, "Should work when you not modify guest role") }) t.Run("a guest cant have user role", func(t *testing.T) { @@ -948,9 +873,8 @@ func TestUpdateTeamMemberRolesChangingGuest(t *testing.T) { _, err := th.App.AddUserToTeam(th.BasicTeam.Id, ruser.Id, "") require.Nil(t, err) - if _, err := th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest team_user"); err == nil { - t.Fatal("Should work when you not modify guest role") - } + _, err = th.App.UpdateTeamMemberRoles(th.BasicTeam.Id, ruser.Id, "team_guest team_user") + require.NotNil(t, err, "Should work when you not modify guest role") }) } From 86891091c020542ee9cb8cd53e2e783f1dcc3029 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Tue, 17 Sep 2019 19:48:22 +0200 Subject: [PATCH 47/53] Converting to structured logging the file app/file.go (#12124) --- app/file.go | 48 +++++++++++++++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 17 deletions(-) diff --git a/app/file.go b/app/file.go index 2d9b71f5b2..5815e6451f 100644 --- a/app/file.go +++ b/app/file.go @@ -139,7 +139,11 @@ func (a *App) GetInfoForFilename(post *model.Post, teamId string, filename strin // Find the path from the Filename of the form /{channelId}/{userId}/{uid}/{nameWithExtension} split := strings.SplitN(filename, "/", 5) if len(split) < 5 { - mlog.Error("Unable to decipher filename when migrating post to use FileInfos", mlog.String("post_id", post.Id), mlog.String("filename", filename)) + mlog.Error( + "Unable to decipher filename when migrating post to use FileInfos", + mlog.String("post_id", post.Id), + mlog.String("filename", filename), + ) return nil } @@ -176,9 +180,10 @@ func (a *App) GetInfoForFilename(post *model.Post, teamId string, filename strin info, err := model.GetInfoForBytes(name, data) if err != nil { mlog.Warn( - fmt.Sprintf("Unable to fully decode file info when migrating post to use FileInfos, err=%v", err), + "Unable to fully decode file info when migrating post to use FileInfos", mlog.String("post_id", post.Id), mlog.String("filename", filename), + mlog.Err(err), ) } @@ -207,7 +212,7 @@ func (a *App) FindTeamIdForFilename(post *model.Post, filename string) string { // This post is in a direct channel so we need to figure out what team the files are stored under. teams, err := a.Srv.Store.Team().GetTeamsByUserId(post.UserId) if err != nil { - mlog.Error(fmt.Sprintf("Unable to get teams when migrating post to use FileInfo, err=%v", err), mlog.String("post_id", post.Id)) + mlog.Error("Unable to get teams when migrating post to use FileInfo", mlog.Err(err), mlog.String("post_id", post.Id)) return "" } @@ -241,9 +246,10 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { filenames := utils.RemoveDuplicatesFromStringArray(post.Filenames) if errCh != nil { mlog.Error( - fmt.Sprintf("Unable to get channel when migrating post to use FileInfos, err=%v", errCh), + "Unable to get channel when migrating post to use FileInfos", mlog.String("post_id", post.Id), mlog.String("channel_id", post.ChannelId), + mlog.Err(errCh), ) return []*model.FileInfo{} } @@ -261,7 +267,8 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { infos := make([]*model.FileInfo, 0, len(filenames)) if teamId == "" { mlog.Error( - fmt.Sprintf("Unable to find team id for files when migrating post to use FileInfos, filenames=%v", filenames), + "Unable to find team id for files when migrating post to use FileInfos", + mlog.String("filenames", strings.Join(filenames, ",")), mlog.String("post_id", post.Id), ) } else { @@ -281,7 +288,7 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { result, err := a.Srv.Store.Post().Get(post.Id, false) if err != nil { - mlog.Error(fmt.Sprintf("Unable to get post when migrating post to use FileInfos, err=%v", err), mlog.String("post_id", post.Id)) + mlog.Error("Unable to get post when migrating post to use FileInfos", mlog.Err(err), mlog.String("post_id", post.Id)) return []*model.FileInfo{} } @@ -290,7 +297,7 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { var fileInfos []*model.FileInfo fileInfos, err = a.Srv.Store.FileInfo().GetForPost(post.Id, true, false, false) if err != nil { - mlog.Error(fmt.Sprintf("Unable to get FileInfos for migrated post, err=%v", err), mlog.String("post_id", post.Id)) + mlog.Error("Unable to get FileInfos for migrated post", mlog.Err(err), mlog.String("post_id", post.Id)) return []*model.FileInfo{} } @@ -305,10 +312,11 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { for _, info := range infos { if _, err = a.Srv.Store.FileInfo().Save(info); err != nil { mlog.Error( - fmt.Sprintf("Unable to save file info when migrating post to use FileInfos, err=%v", err), + "Unable to save file info when migrating post to use FileInfos", mlog.String("post_id", post.Id), mlog.String("file_info_id", info.Id), mlog.String("file_info_path", info.Path), + mlog.Err(err), ) continue } @@ -326,7 +334,13 @@ func (a *App) MigrateFilenamesToFileInfos(post *model.Post) []*model.FileInfo { // Update Posts to clear Filenames and set FileIds if _, err = a.Srv.Store.Post().Update(newPost, post); err != nil { - mlog.Error(fmt.Sprintf("Unable to save migrated post when migrating to use FileInfos, new_file_ids=%v, old_filenames=%v, err=%v", newPost.FileIds, post.Filenames, err), mlog.String("post_id", post.Id)) + mlog.Error( + "Unable to save migrated post when migrating to use FileInfos", + mlog.String("new_file_ids", strings.Join(newPost.FileIds, ",")), + mlog.String("old_filenames", strings.Join(post.Filenames, ",")), + mlog.String("post_id", post.Id), + mlog.Err(err), + ) return []*model.FileInfo{} } return savedInfos @@ -755,7 +769,7 @@ func (t *uploadFileTask) postprocessImage() { var err error decoded, typ, err = image.Decode(t.newReader()) if err != nil { - mlog.Error(fmt.Sprintf("Unable to decode image err=%v", err)) + mlog.Error("Unable to decode image", mlog.Err(err)) return } } @@ -779,14 +793,14 @@ func (t *uploadFileTask) postprocessImage() { go func() { _, aerr := t.writeFile(r, path) if aerr != nil { - mlog.Error(fmt.Sprintf("Unable to upload path=%v err=%v", path, aerr)) + mlog.Error("Unable to upload", mlog.String("path", path), mlog.Err(aerr)) return } }() err := jpeg.Encode(w, img, &jpeg.Options{Quality: 90}) if err != nil { - mlog.Error(fmt.Sprintf("Unable to encode image as jpeg path=%v err=%v", path, err)) + mlog.Error("Unable to encode image as jpeg", mlog.String("path", path), mlog.Err(err)) w.CloseWithError(err) } else { w.Close() @@ -959,7 +973,7 @@ func prepareImage(fileData []byte) (image.Image, int, int) { // Decode image bytes into Image object img, imgType, err := image.Decode(bytes.NewReader(fileData)) if err != nil { - mlog.Error(fmt.Sprintf("Unable to decode image err=%v", err)) + mlog.Error("Unable to decode image", mlog.Err(err)) return nil, 0, 0 } @@ -1038,12 +1052,12 @@ func (a *App) generateThumbnailImage(img image.Image, thumbnailPath string, widt buf := new(bytes.Buffer) if err := jpeg.Encode(buf, thumbnail, &jpeg.Options{Quality: 90}); err != nil { - mlog.Error(fmt.Sprintf("Unable to encode image as jpeg path=%v err=%v", thumbnailPath, err)) + mlog.Error("Unable to encode image as jpeg", mlog.String("path", thumbnailPath), mlog.Err(err)) return } if _, err := a.WriteFile(buf, thumbnailPath); err != nil { - mlog.Error(fmt.Sprintf("Unable to upload thumbnail path=%v err=%v", thumbnailPath, err)) + mlog.Error("Unable to upload thumbnail", mlog.String("path", thumbnailPath), mlog.Err(err)) return } } @@ -1060,12 +1074,12 @@ func (a *App) generatePreviewImage(img image.Image, previewPath string, width in buf := new(bytes.Buffer) if err := jpeg.Encode(buf, preview, &jpeg.Options{Quality: 90}); err != nil { - mlog.Error(fmt.Sprintf("Unable to encode image as preview jpg err=%v", err), mlog.String("path", previewPath)) + mlog.Error("Unable to encode image as preview jpg", mlog.Err(err), mlog.String("path", previewPath)) return } if _, err := a.WriteFile(buf, previewPath); err != nil { - mlog.Error(fmt.Sprintf("Unable to upload preview err=%v", err), mlog.String("path", previewPath)) + mlog.Error("Unable to upload preview", mlog.Err(err), mlog.String("path", previewPath)) return } } From 4ce7b92283aaa3ca56470891e6728c98a2a63df0 Mon Sep 17 00:00:00 2001 From: Jesse Hallam Date: Tue, 17 Sep 2019 16:02:26 -0300 Subject: [PATCH 48/53] MM-17023: Plugin Marketplace (#12183) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * MM-17149 - Extend config.json for marketplace settings (#11933) * MM-17149 - Extend config.json for marketplace settings * Renamed MarketplaceUrl, tracking default marketplace url * Added EnableMarketplace to the client config * Revert "Added EnableMarketplace to the client config" This reverts commit 0f982c4c661c2cd9bb96264e9a01a2363c40d9c5. * MM-17149 - Added EnableMarketplace to the client config (#11958) * Added EnableMarketplace to the client config * Moved EnableMarketplace setting out of limited client configuration * MM-17150, MM-17545, MM-18100 - Implement GET /api/v4/plugins/m… (#11977) * MM-17150 - Implement GET /api/v4/plugins/marketplace proxying upstream MM-17545 - Merge locally installed plugins into GET /api/v4/plugins/marketplace * Replaced MarketplacePluginState with Installed * Setting InstalledVersion instead of Installed * marketplace client setting per_page if non zero * Creating insecure client for marketplace url * Fixed trailing slash for default marketplace url * Adding filtering * Fixed function names * Renamed Manifest() to GetManifest(), added godoc for BaseMarketplacePlugin * Handling plugin.ErrNotFound correctly * Checking err == nil instead when a plugin is installed * MM-18450 - Local-only plugin search (#12152) * MM-17846: plugin icons (#12157) * MM-17846: add support for plugin icons Extend the model definitions to support plugin icons from the marketplace. * s/IconURL/IconData * MM-18475 - Converge on snake_case responses from the marketplace (#12179) * MM-18520 - MM-Server should forward server version to marketplace server (#12181) * Renamed request to filter client4.GetMarketplacePlugins * Renamed request to filter * Guarding against bad marketplace server response --- api4/helpers.go | 25 +++ api4/plugin.go | 62 ++++++ api4/plugin_test.go | 332 ++++++++++++++++++++++++++++++++- app/diagnostics.go | 2 + app/plugin.go | 99 ++++++++++ config/client.go | 1 + i18n/en.json | 16 ++ model/client4.go | 25 +++ model/config.go | 28 +-- model/marketplace_plugin.go | 69 +++++++ plugin/environment.go | 19 ++ services/marketplace/client.go | 79 ++++++++ tests/testpluginv2.tar.gz | Bin 0 -> 71989 bytes 13 files changed, 736 insertions(+), 21 deletions(-) create mode 100644 api4/helpers.go create mode 100644 model/marketplace_plugin.go create mode 100644 services/marketplace/client.go create mode 100644 tests/testpluginv2.tar.gz diff --git a/api4/helpers.go b/api4/helpers.go new file mode 100644 index 0000000000..6b07a279ce --- /dev/null +++ b/api4/helpers.go @@ -0,0 +1,25 @@ +// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package api4 + +import ( + "net/url" + "strconv" + + "github.com/pkg/errors" +) + +func parseInt(u *url.URL, name string, defaultValue int) (int, error) { + valueStr := u.Query().Get(name) + if valueStr == "" { + return defaultValue, nil + } + + value, err := strconv.Atoi(valueStr) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse %s as integer", name) + } + + return value, nil +} diff --git a/api4/plugin.go b/api4/plugin.go index 1f045afc71..18ce219975 100644 --- a/api4/plugin.go +++ b/api4/plugin.go @@ -7,6 +7,7 @@ package api4 import ( "bytes" + "encoding/json" "io/ioutil" "net/http" "net/url" @@ -36,6 +37,8 @@ func (api *API) InitPlugin() { api.BaseRoutes.Plugin.Handle("/disable", api.ApiSessionRequired(disablePlugin)).Methods("POST") api.BaseRoutes.Plugins.Handle("/webapp", api.ApiHandler(getWebappPlugins)).Methods("GET") + + api.BaseRoutes.Plugins.Handle("/marketplace", api.ApiSessionRequired(getMarketplacePlugins)).Methods("GET") } func uploadPlugin(c *Context, w http.ResponseWriter, r *http.Request) { @@ -239,6 +242,43 @@ func getWebappPlugins(c *Context, w http.ResponseWriter, r *http.Request) { w.Write([]byte(model.ManifestListToJson(clientManifests))) } +func getMarketplacePlugins(c *Context, w http.ResponseWriter, r *http.Request) { + if !*c.App.Config().PluginSettings.Enable { + c.Err = model.NewAppError("getMarketplacePlugins", "app.plugin.disabled.app_error", nil, "", http.StatusNotImplemented) + return + } + + if !*c.App.Config().PluginSettings.EnableMarketplace { + c.Err = model.NewAppError("getMarketplacePlugins", "app.plugin.marketplace_disabled.app_error", nil, "", http.StatusNotImplemented) + return + } + + if !c.App.SessionHasPermissionTo(c.App.Session, model.PERMISSION_MANAGE_SYSTEM) { + c.SetPermissionError(model.PERMISSION_MANAGE_SYSTEM) + return + } + + filter, err := parseMarketplacePluginFilter(r.URL) + if err != nil { + c.Err = model.NewAppError("getMarketplacePlugins", "app.plugin.marshal.app_error", nil, err.Error(), http.StatusInternalServerError) + return + } + + plugins, appErr := c.App.GetMarketplacePlugins(filter) + if appErr != nil { + c.Err = appErr + return + } + + json, err := json.Marshal(plugins) + if err != nil { + c.Err = model.NewAppError("getMarketplacePlugins", "app.plugin.marshal.app_error", nil, err.Error(), http.StatusInternalServerError) + return + } + + w.Write(json) +} + func enablePlugin(c *Context, w http.ResponseWriter, r *http.Request) { c.RequirePluginId() if c.Err != nil { @@ -286,3 +326,25 @@ func disablePlugin(c *Context, w http.ResponseWriter, r *http.Request) { ReturnStatusOK(w) } + +func parseMarketplacePluginFilter(u *url.URL) (*model.MarketplacePluginFilter, error) { + page, err := parseInt(u, "page", 0) + if err != nil { + return nil, err + } + + perPage, err := parseInt(u, "per_page", 100) + if err != nil { + return nil, err + } + + filter := u.Query().Get("filter") + serverVersion := u.Query().Get("server_version") + + return &model.MarketplacePluginFilter{ + Page: page, + PerPage: perPage, + Filter: filter, + ServerVersion: serverVersion, + }, nil +} diff --git a/api4/plugin_test.go b/api4/plugin_test.go index fef1ac3918..4dbed6ba4a 100644 --- a/api4/plugin_test.go +++ b/api4/plugin_test.go @@ -11,6 +11,8 @@ import ( "net/http/httptest" "os" "path/filepath" + "sort" + "strings" "testing" "time" @@ -36,9 +38,7 @@ func TestPlugin(t *testing.T) { path, _ := fileutils.FindDir("tests") tarData, err := ioutil.ReadFile(filepath.Join(path, "testplugin.tar.gz")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Install from URL testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { @@ -270,9 +270,7 @@ func TestNotifyClusterPluginEvent(t *testing.T) { path, _ := fileutils.FindDir("tests") tarData, err := ioutil.ReadFile(filepath.Join(path, "testplugin.tar.gz")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) testCluster.ClearMessages() @@ -331,9 +329,7 @@ func TestNotifyClusterPluginEvent(t *testing.T) { func TestDisableOnRemove(t *testing.T) { path, _ := fileutils.FindDir("tests") tarData, err := ioutil.ReadFile(filepath.Join(path, "testplugin.tar.gz")) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) testCases := []struct { Description string @@ -432,6 +428,324 @@ func TestDisableOnRemove(t *testing.T) { } } +func TestGetMarketplacePlugins(t *testing.T) { + th := Setup().InitBasic() + defer th.TearDown() + + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.Enable = true + *cfg.PluginSettings.EnableUploads = true + *cfg.PluginSettings.EnableMarketplace = false + }) + + t.Run("marketplace disabled", func(t *testing.T) { + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.EnableMarketplace = false + *cfg.PluginSettings.MarketplaceUrl = "invalid.com" + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNotImplementedStatus(t, resp) + require.Nil(t, plugins) + }) + + t.Run("no server", func(t *testing.T) { + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.EnableMarketplace = true + *cfg.PluginSettings.MarketplaceUrl = "invalid.com" + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckInternalErrorStatus(t, resp) + require.Nil(t, plugins) + }) + + t.Run("no permission", func(t *testing.T) { + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.EnableMarketplace = true + *cfg.PluginSettings.MarketplaceUrl = "invalid.com" + }) + + plugins, resp := th.Client.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckForbiddenStatus(t, resp) + require.Nil(t, plugins) + }) + + t.Run("empty response from server", func(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + json, err := json.Marshal([]*model.MarketplacePlugin{}) + require.NoError(t, err) + res.Write(json) + })) + defer func() { testServer.Close() }() + + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.EnableMarketplace = true + *cfg.PluginSettings.MarketplaceUrl = testServer.URL + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Len(t, plugins, 0) + }) + + t.Run("verify server version is passed through", func(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + serverVersion, ok := req.URL.Query()["server_version"] + require.True(t, ok) + require.Len(t, serverVersion, 1) + require.Equal(t, model.CurrentVersion, serverVersion[0]) + require.NotEqual(t, 0, len(serverVersion[0])) + + res.WriteHeader(http.StatusOK) + json, err := json.Marshal([]*model.MarketplacePlugin{}) + require.NoError(t, err) + res.Write(json) + })) + defer func() { testServer.Close() }() + + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.EnableMarketplace = true + *cfg.PluginSettings.MarketplaceUrl = testServer.URL + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Len(t, plugins, 0) + }) +} + +func TestGetInstalledMarketplacePlugins(t *testing.T) { + samplePlugins := []*model.MarketplacePlugin{ + { + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + HomepageURL: "https://github.com/mattermost/mattermost-plugin-nps", + IconData: "http://example.com/icon.svg", + DownloadURL: "https://github.com/mattermost/mattermost-plugin-nps/releases/download/v1.0.3/com.mattermost.nps-1.0.3.tar.gz", + Manifest: &model.Manifest{ + Id: "com.mattermost.nps", + Name: "User Satisfaction Surveys", + Description: "This plugin sends quarterly user satisfaction surveys to gather feedback and help improve Mattermost.", + Version: "1.0.3", + MinServerVersion: "5.14.0", + }, + }, + InstalledVersion: "", + }, + } + + path, _ := fileutils.FindDir("tests") + tarData, err := ioutil.ReadFile(filepath.Join(path, "testplugin.tar.gz")) + require.NoError(t, err) + + t.Run("marketplace client returns not-installed plugin", func(t *testing.T) { + th := Setup().InitBasic() + defer th.TearDown() + + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + json, err := json.Marshal(samplePlugins) + require.NoError(t, err) + res.Write(json) + })) + defer func() { testServer.Close() }() + + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.Enable = true + *cfg.PluginSettings.EnableUploads = true + *cfg.PluginSettings.EnableMarketplace = true + *cfg.PluginSettings.MarketplaceUrl = testServer.URL + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Equal(t, samplePlugins, plugins) + + manifest, resp := th.SystemAdminClient.UploadPlugin(bytes.NewReader(tarData)) + CheckNoError(t, resp) + + expectedPlugins := append(samplePlugins, &model.MarketplacePlugin{ + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + HomepageURL: "", + IconData: "", + DownloadURL: "", + Manifest: manifest, + }, + InstalledVersion: manifest.Version, + }) + sort.SliceStable(expectedPlugins, func(i, j int) bool { + return strings.ToLower(expectedPlugins[i].Manifest.Name) < strings.ToLower(expectedPlugins[j].Manifest.Name) + }) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Equal(t, expectedPlugins, plugins) + + ok, resp := th.SystemAdminClient.RemovePlugin(manifest.Id) + CheckNoError(t, resp) + assert.True(t, ok) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Equal(t, samplePlugins, plugins) + }) + + t.Run("marketplace client returns installed plugin", func(t *testing.T) { + th := Setup().InitBasic() + defer th.TearDown() + + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.Enable = true + *cfg.PluginSettings.EnableUploads = true + *cfg.PluginSettings.EnableMarketplace = true + }) + + manifest, resp := th.SystemAdminClient.UploadPlugin(bytes.NewReader(tarData)) + CheckNoError(t, resp) + + newPlugin := &model.MarketplacePlugin{ + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + HomepageURL: "HomepageURL", + IconData: "IconData", + DownloadURL: "DownloadURL", + Manifest: manifest, + }, + InstalledVersion: manifest.Version, + } + expectedPlugins := append(samplePlugins, newPlugin) + sort.SliceStable(expectedPlugins, func(i, j int) bool { + return strings.ToLower(expectedPlugins[i].Manifest.Name) < strings.ToLower(expectedPlugins[j].Manifest.Name) + }) + + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + json, err := json.Marshal([]*model.MarketplacePlugin{samplePlugins[0], newPlugin}) + require.NoError(t, err) + res.Write(json) + })) + defer func() { testServer.Close() }() + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.MarketplaceUrl = testServer.URL + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Equal(t, expectedPlugins, plugins) + + ok, resp := th.SystemAdminClient.RemovePlugin(manifest.Id) + CheckNoError(t, resp) + assert.True(t, ok) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + newPlugin.InstalledVersion = manifest.Version + require.Equal(t, expectedPlugins, plugins) + }) +} + +func TestSearchGetMarketplacePlugins(t *testing.T) { + samplePlugins := []*model.MarketplacePlugin{ + { + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + HomepageURL: "https://github.com/mattermost/mattermost-plugin-nps", + IconData: "Cjxzdmcgdmlld0JveD0nMCAwIDEwNSA5MycgeG1sbnM9J2h0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnJz4KPHBhdGggZD0nTTY2LDBoMzl2OTN6TTM4LDBoLTM4djkzek01MiwzNWwyNSw1OGgtMTZsLTgtMThoLTE4eicgZmlsbD0nI0VEMUMyNCcvPgo8L3N2Zz4K", + DownloadURL: "https://github.com/mattermost/mattermost-plugin-nps/releases/download/v1.0.3/com.mattermost.nps-1.0.3.tar.gz", + Manifest: &model.Manifest{ + Id: "com.mattermost.nps", + Name: "User Satisfaction Surveys", + Description: "This plugin sends quarterly user satisfaction surveys to gather feedback and help improve Mattermost.", + Version: "1.0.3", + MinServerVersion: "5.14.0", + }, + }, + InstalledVersion: "", + }, + } + + path, _ := fileutils.FindDir("tests") + tarData, err := ioutil.ReadFile(filepath.Join(path, "testplugin.tar.gz")) + require.NoError(t, err) + + tarDataV2, err := ioutil.ReadFile(filepath.Join(path, "testpluginv2.tar.gz")) + require.NoError(t, err) + + t.Run("search installed plugin", func(t *testing.T) { + th := Setup().InitBasic() + defer th.TearDown() + + testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + json, err := json.Marshal(samplePlugins) + require.NoError(t, err) + res.Write(json) + })) + defer func() { testServer.Close() }() + + th.App.UpdateConfig(func(cfg *model.Config) { + *cfg.PluginSettings.Enable = true + *cfg.PluginSettings.EnableUploads = true + *cfg.PluginSettings.EnableMarketplace = true + *cfg.PluginSettings.MarketplaceUrl = testServer.URL + }) + + plugins, resp := th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Equal(t, samplePlugins, plugins) + + manifest, resp := th.SystemAdminClient.UploadPlugin(bytes.NewReader(tarData)) + CheckNoError(t, resp) + + newPluginV1 := &model.MarketplacePlugin{ + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + HomepageURL: "", + IconData: "", + DownloadURL: "", + Manifest: manifest, + }, + InstalledVersion: manifest.Version, + } + expectedPlugins := append(samplePlugins, newPluginV1) + + manifest, resp = th.SystemAdminClient.UploadPlugin(bytes.NewReader(tarDataV2)) + CheckNoError(t, resp) + newPluginV2 := &model.MarketplacePlugin{ + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + HomepageURL: "", + IconData: "", + DownloadURL: "", + Manifest: manifest, + }, + InstalledVersion: manifest.Version, + } + expectedPlugins = append(expectedPlugins, newPluginV2) + sort.SliceStable(expectedPlugins, func(i, j int) bool { + return strings.ToLower(expectedPlugins[i].Manifest.Name) < strings.ToLower(expectedPlugins[j].Manifest.Name) + }) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{}) + CheckNoError(t, resp) + require.Equal(t, expectedPlugins, plugins) + + // Search for plugins from the server + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{Filter: "testplugin_v2"}) + CheckNoError(t, resp) + require.Equal(t, []*model.MarketplacePlugin{newPluginV2}, plugins) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{Filter: "dsgsdg_v2"}) + CheckNoError(t, resp) + require.Equal(t, []*model.MarketplacePlugin{newPluginV2}, plugins) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{Filter: "User Satisfaction Surveys"}) + CheckNoError(t, resp) + require.Equal(t, samplePlugins, plugins) + + plugins, resp = th.SystemAdminClient.GetMarketplacePlugins(&model.MarketplacePluginFilter{Filter: "NOFILTER"}) + CheckNoError(t, resp) + require.Nil(t, plugins) + }) +} + func findClusterMessages(event string, msgs []*model.ClusterMessage) []*model.ClusterMessage { var result []*model.ClusterMessage for _, msg := range msgs { diff --git a/app/diagnostics.go b/app/diagnostics.go index 98a8f4dece..d5541b5814 100644 --- a/app/diagnostics.go +++ b/app/diagnostics.go @@ -602,6 +602,8 @@ func (a *App) trackConfig() { "enable_uploads": *cfg.PluginSettings.EnableUploads, "allow_insecure_download_url": *cfg.PluginSettings.AllowInsecureDownloadUrl, "enable_health_check": *cfg.PluginSettings.EnableHealthCheck, + "enable_marketplace": *cfg.PluginSettings.EnableMarketplace, + "is_default_marketplace_url": isDefault(*cfg.PluginSettings.MarketplaceUrl, model.PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL), }) a.SendDiagnostic(TRACK_CONFIG_DATA_RETENTION, map[string]interface{}{ diff --git a/app/plugin.go b/app/plugin.go index 2a49a2ecb9..c1d67a44fc 100644 --- a/app/plugin.go +++ b/app/plugin.go @@ -7,12 +7,14 @@ import ( "net/http" "os" "path/filepath" + "sort" "strings" "github.com/mattermost/mattermost-server/mlog" "github.com/mattermost/mattermost-server/model" "github.com/mattermost/mattermost-server/plugin" "github.com/mattermost/mattermost-server/services/filesstore" + "github.com/mattermost/mattermost-server/services/marketplace" "github.com/mattermost/mattermost-server/utils/fileutils" "github.com/pkg/errors" ) @@ -400,6 +402,103 @@ func (a *App) GetPlugins() (*model.PluginsResponse, *model.AppError) { return resp, nil } +// GetMarketplacePlugins returns a list of plugins from the marketplace-server, +// and plugins that are installed locally. +func (a *App) GetMarketplacePlugins(filter *model.MarketplacePluginFilter) ([]*model.MarketplacePlugin, *model.AppError) { + var result []*model.MarketplacePlugin + pluginSet := map[string]bool{} + pluginsEnvironment := a.GetPluginsEnvironment() + if pluginsEnvironment == nil { + return nil, model.NewAppError("GetMarketplacePlugins", "app.plugin.config.app_error", nil, "", http.StatusInternalServerError) + } + + marketplaceClient, err := marketplace.NewClient( + *a.Config().PluginSettings.MarketplaceUrl, + a.HTTPService, + ) + if err != nil { + return nil, model.NewAppError("GetMarketplacePlugins", "app.plugin.marketplace_client.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + // Fetch all plugins from marketplace. + marketplacePlugins, err := marketplaceClient.GetPlugins(&model.MarketplacePluginFilter{ + PerPage: -1, + ServerVersion: model.CurrentVersion, + }) + if err != nil { + return nil, model.NewAppError("GetMarketplacePlugins", "app.plugin.marketplace_plugins.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + for _, p := range marketplacePlugins { + if p.Manifest == nil || !pluginMatchesFilter(p.Manifest, filter.Filter) { + continue + } + + marketplacePlugin := &model.MarketplacePlugin{ + BaseMarketplacePlugin: p, + } + + var manifest *model.Manifest + if manifest, err = pluginsEnvironment.GetManifest(p.Manifest.Id); err != nil && err != plugin.ErrNotFound { + return nil, model.NewAppError("GetMarketplacePlugins", "app.plugin.config.app_error", nil, err.Error(), http.StatusInternalServerError) + } else if err == nil { + // Plugin is installed. + marketplacePlugin.InstalledVersion = manifest.Version + } + + pluginSet[p.Manifest.Id] = true + result = append(result, marketplacePlugin) + } + + // Include all other installed plugins. + plugins, err := pluginsEnvironment.Available() + if err != nil { + return nil, model.NewAppError("GetMarketplacePlugins", "app.plugin.config.app_error", nil, err.Error(), http.StatusInternalServerError) + } + + for _, plugin := range plugins { + if plugin.Manifest == nil || pluginSet[plugin.Manifest.Id] || !pluginMatchesFilter(plugin.Manifest, filter.Filter) { + continue + } + + result = append(result, &model.MarketplacePlugin{ + BaseMarketplacePlugin: &model.BaseMarketplacePlugin{ + Manifest: plugin.Manifest, + }, + InstalledVersion: plugin.Manifest.Version, + }) + } + + // Sort result alphabetically. + sort.SliceStable(result, func(i, j int) bool { + return strings.ToLower(result[i].Manifest.Name) < strings.ToLower(result[j].Manifest.Name) + }) + + return result, nil +} + +func pluginMatchesFilter(manifest *model.Manifest, filter string) bool { + filter = strings.TrimSpace(strings.ToLower(filter)) + + if filter == "" { + return true + } + + if strings.ToLower(manifest.Id) == filter { + return true + } + + if strings.Contains(strings.ToLower(manifest.Name), filter) { + return true + } + + if strings.Contains(strings.ToLower(manifest.Description), filter) { + return true + } + + return false +} + // notifyPluginEnabled notifies connected websocket clients across all peers if the version of the given // plugin is same across them. // diff --git a/config/client.go b/config/client.go index a5a7a8c62c..3c1eca98cb 100644 --- a/config/client.go +++ b/config/client.go @@ -43,6 +43,7 @@ func GenerateClientConfig(c *model.Config, diagnosticId string, license *model.L props["ExperimentalEnableDefaultChannelLeaveJoinMessages"] = strconv.FormatBool(*c.ServiceSettings.ExperimentalEnableDefaultChannelLeaveJoinMessages) props["ExperimentalGroupUnreadChannels"] = *c.ServiceSettings.ExperimentalGroupUnreadChannels props["EnableSVGs"] = strconv.FormatBool(*c.ServiceSettings.EnableSVGs) + props["EnableMarketplace"] = strconv.FormatBool(*c.PluginSettings.EnableMarketplace) // This setting is only temporary, so keep using the old setting name for the mobile and web apps props["ExperimentalEnablePostMetadata"] = "true" diff --git a/i18n/en.json b/i18n/en.json index 2376e75418..778ce46cf3 100644 --- a/i18n/en.json +++ b/i18n/en.json @@ -3506,6 +3506,22 @@ "id": "app.plugin.manifest.app_error", "translation": "Unable to find manifest for extracted plugin" }, + { + "id": "app.plugin.marketplace_client.app_error", + "translation": "Failed to create marketplace client." + }, + { + "id": "app.plugin.marketplace_disabled.app_error", + "translation": "Marketplace has been disabled. Please check your logs for details." + }, + { + "id": "app.plugin.marketplace_plugins.app_error", + "translation": "Failed to get plugins from the marketplace server." + }, + { + "id": "app.plugin.marshal.app_error", + "translation": "Failed to marshal marketplace plugins." + }, { "id": "app.plugin.mvdir.app_error", "translation": "Unable to move plugin from temporary directory to final destination. Another plugin may be using the same directory name." diff --git a/model/client4.go b/model/client4.go index aac5fd4bf3..2dffa31291 100644 --- a/model/client4.go +++ b/model/client4.go @@ -4519,6 +4519,31 @@ func (c *Client4) DisablePlugin(id string) (bool, *Response) { return CheckStatusOK(r), BuildResponse(r) } +// GetMarketplacePlugins will return a list of plugins that an admin can install. +// WARNING: PLUGINS ARE STILL EXPERIMENTAL. THIS FUNCTION IS SUBJECT TO CHANGE. +func (c *Client4) GetMarketplacePlugins(filter *MarketplacePluginFilter) ([]*MarketplacePlugin, *Response) { + route := c.GetPluginsRoute() + "/marketplace" + u, parseErr := url.Parse(route) + if parseErr != nil { + return nil, &Response{Error: NewAppError("GetMarketplacePlugins", "model.client.parse_plugins.app_error", nil, parseErr.Error(), http.StatusBadRequest)} + } + + filter.ApplyToURL(u) + + r, err := c.DoApiGet(u.String(), "") + if err != nil { + return nil, BuildErrorResponse(r, err) + } + defer closeBody(r) + + plugins, readerErr := MarketplacePluginsFromReader(r.Body) + if readerErr != nil { + return nil, BuildErrorResponse(r, NewAppError(route, "model.client.parse_plugins.app_error", nil, err.Error(), http.StatusBadRequest)) + } + + return plugins, BuildResponse(r) +} + // UpdateChannelScheme will update a channel's scheme. func (c *Client4) UpdateChannelScheme(channelId, schemeId string) (bool, *Response) { sip := &SchemeIDPatch{SchemeID: &schemeId} diff --git a/model/config.go b/model/config.go index 740c92ccf3..378933abc7 100644 --- a/model/config.go +++ b/model/config.go @@ -171,8 +171,10 @@ const ( DATA_RETENTION_SETTINGS_DEFAULT_FILE_RETENTION_DAYS = 365 DATA_RETENTION_SETTINGS_DEFAULT_DELETION_JOB_START_TIME = "02:00" - PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" - PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" + PLUGIN_SETTINGS_DEFAULT_DIRECTORY = "./plugins" + PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY = "./client/plugins" + PLUGIN_SETTINGS_DEFAULT_ENABLE_MARKETPLACE = true + PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL = "https://marketplace.integrations.mattermost.com" COMPLIANCE_EXPORT_TYPE_CSV = "csv" COMPLIANCE_EXPORT_TYPE_ACTIANCE = "actiance" @@ -2201,6 +2203,8 @@ type PluginSettings struct { ClientDirectory *string `restricted:"true"` Plugins map[string]map[string]interface{} PluginStates map[string]*PluginState + EnableMarketplace *bool + MarketplaceUrl *string } func (s *PluginSettings) SetDefaults(ls LogSettings) { @@ -2220,22 +2224,14 @@ func (s *PluginSettings) SetDefaults(ls LogSettings) { s.EnableHealthCheck = NewBool(true) } - if s.Directory == nil { + if s.Directory == nil || *s.Directory == "" { s.Directory = NewString(PLUGIN_SETTINGS_DEFAULT_DIRECTORY) } - if *s.Directory == "" { - *s.Directory = PLUGIN_SETTINGS_DEFAULT_DIRECTORY - } - - if s.ClientDirectory == nil { + if s.ClientDirectory == nil || *s.ClientDirectory == "" { s.ClientDirectory = NewString(PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY) } - if *s.ClientDirectory == "" { - *s.ClientDirectory = PLUGIN_SETTINGS_DEFAULT_CLIENT_DIRECTORY - } - if s.Plugins == nil { s.Plugins = make(map[string]map[string]interface{}) } @@ -2248,6 +2244,14 @@ func (s *PluginSettings) SetDefaults(ls LogSettings) { // Enable the NPS plugin by default if diagnostics are enabled s.PluginStates["com.mattermost.nps"] = &PluginState{Enable: ls.EnableDiagnostics == nil || *ls.EnableDiagnostics} } + + if s.EnableMarketplace == nil { + s.EnableMarketplace = NewBool(PLUGIN_SETTINGS_DEFAULT_ENABLE_MARKETPLACE) + } + + if s.MarketplaceUrl == nil || *s.MarketplaceUrl == "" { + s.MarketplaceUrl = NewString(PLUGIN_SETTINGS_DEFAULT_MARKETPLACE_URL) + } } type GlobalRelayMessageExportSettings struct { diff --git a/model/marketplace_plugin.go b/model/marketplace_plugin.go new file mode 100644 index 0000000000..154bdde00d --- /dev/null +++ b/model/marketplace_plugin.go @@ -0,0 +1,69 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See LICENSE.txt for license information. + +package model + +import ( + "encoding/json" + "io" + "net/url" + "strconv" +) + +// BaseMarketplacePlugin is a Mattermost plugin received from the marketplace server. +type BaseMarketplacePlugin struct { + HomepageURL string `json:"homepage_url"` + DownloadURL string `json:"download_url"` + IconData string `json:"icon_data"` + Manifest *Manifest `json:"manifest"` +} + +// MarketplacePlugin is a state aware marketplace plugin. +type MarketplacePlugin struct { + *BaseMarketplacePlugin + InstalledVersion string `json:"installed_version"` +} + +// BaseMarketplacePluginsFromReader decodes a json-encoded list of plugins from the given io.Reader. +func BaseMarketplacePluginsFromReader(reader io.Reader) ([]*BaseMarketplacePlugin, error) { + plugins := []*BaseMarketplacePlugin{} + decoder := json.NewDecoder(reader) + + if err := decoder.Decode(&plugins); err != nil && err != io.EOF { + return nil, err + } + + return plugins, nil +} + +// MarketplacePluginsFromReader decodes a json-encoded list of plugins from the given io.Reader. +func MarketplacePluginsFromReader(reader io.Reader) ([]*MarketplacePlugin, error) { + plugins := []*MarketplacePlugin{} + decoder := json.NewDecoder(reader) + + if err := decoder.Decode(&plugins); err != nil && err != io.EOF { + return nil, err + } + + return plugins, nil +} + +// MarketplacePluginFilter describes the parameters to request a list of plugins. +type MarketplacePluginFilter struct { + Page int + PerPage int + Filter string + ServerVersion string +} + +// ApplyToURL modifies the given url to include query string parameters for the request. +func (filter *MarketplacePluginFilter) ApplyToURL(u *url.URL) { + q := u.Query() + q.Add("page", strconv.Itoa(filter.Page)) + if filter.PerPage > 0 { + q.Add("per_page", strconv.Itoa(filter.PerPage)) + } + q.Add("filter", filter.Filter) + q.Add("server_version", filter.ServerVersion) + u.RawQuery = q.Encode() +} diff --git a/plugin/environment.go b/plugin/environment.go index e4410cf26a..3a9cdf7ab0 100644 --- a/plugin/environment.go +++ b/plugin/environment.go @@ -18,6 +18,8 @@ import ( "github.com/pkg/errors" ) +var ErrNotFound = errors.New("Item not found") + type apiImplCreatorFunc func(*model.Manifest) API // registeredPlugin stores the state for a given plugin that has been activated @@ -162,6 +164,23 @@ func (env *Environment) Statuses() (model.PluginStatuses, error) { return pluginStatuses, nil } +// GetManifest returns a manifest for a given pluginId. +// Returns ErrNotFound if plugin is not found. +func (env *Environment) GetManifest(pluginId string) (*model.Manifest, error) { + plugins, err := env.Available() + if err != nil { + return nil, errors.Wrap(err, "unable to get plugin statuses") + } + + for _, plugin := range plugins { + if plugin.Manifest != nil && plugin.Manifest.Id == pluginId { + return plugin.Manifest, nil + } + } + + return nil, ErrNotFound +} + func (env *Environment) Activate(id string) (manifest *model.Manifest, activated bool, reterr error) { // Check if we are already active if env.IsActive(id) { diff --git a/services/marketplace/client.go b/services/marketplace/client.go new file mode 100644 index 0000000000..9b03dd8c50 --- /dev/null +++ b/services/marketplace/client.go @@ -0,0 +1,79 @@ +// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. +// See License.txt for license information. + +package marketplace + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/mattermost/mattermost-server/model" + "github.com/mattermost/mattermost-server/services/httpservice" + "github.com/pkg/errors" +) + +// Client is the programmatic interface to the marketplace server API. +type Client struct { + address string + httpClient *http.Client +} + +// NewClient creates a client to the marketplace server at the given address. +func NewClient(address string, httpService httpservice.HTTPService) (*Client, error) { + var httpClient *http.Client + addressUrl, err := url.Parse(address) + if err != nil { + return nil, errors.Wrap(err, "failed to parse marketplace address") + } + if addressUrl.Hostname() == "localhost" || addressUrl.Hostname() == "127.0.0.1" { + httpClient = httpService.MakeClient(true) + } else { + httpClient = httpService.MakeClient(false) + } + + return &Client{ + address: address, + httpClient: httpClient, + }, nil +} + +// GetPlugins fetches the list of plugins from the configured server. +func (c *Client) GetPlugins(request *model.MarketplacePluginFilter) ([]*model.BaseMarketplacePlugin, error) { + u, err := url.Parse(c.buildURL("/api/v1/plugins")) + if err != nil { + return nil, err + } + + request.ApplyToURL(u) + + resp, err := c.doGet(u.String()) + if err != nil { + return nil, err + } + defer closeBody(resp) + + switch resp.StatusCode { + case http.StatusOK: + return model.BaseMarketplacePluginsFromReader(resp.Body) + default: + return nil, errors.Errorf("failed with status code %d", resp.StatusCode) + } +} + +// closeBody ensures the Body of an http.Response is properly closed. +func closeBody(r *http.Response) { + if r.Body != nil { + _, _ = ioutil.ReadAll(r.Body) + _ = r.Body.Close() + } +} + +func (c *Client) buildURL(urlPath string, args ...interface{}) string { + return fmt.Sprintf("%s%s", c.address, fmt.Sprintf(urlPath, args...)) +} + +func (c *Client) doGet(u string) (*http.Response, error) { + return c.httpClient.Get(u) +} diff --git a/tests/testpluginv2.tar.gz b/tests/testpluginv2.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..1e0693fb51a5147f785f95aee2c1c86f29fd4e94 GIT binary patch literal 71989 zcmV(+K;6F|iwFRID{Wl>1MI!~cN@o%F#7#!o|FG!q}TxyP+f`3u%IHrCc3KiYWm~5?a|Y<)u&IN zJc0RtvbOQ$_xb-V{=E6;AK}k=mfmF>fR87S9@WqP`lF{X|Bp8|)`0`8ufhC3U469r ze}dJ!>_pw)|Ig?Dm!$yy?kD~Go#4JDhVJ?L{WgC}qfuP@u^;EXNiv=$S&C))`Dxxi zb-pu4flu5Hy*rtu{bAf4N7FN`BTUBu&jo-C?^C6#rK_d?ZZ!R`{j1{tRa0@BHs-DW zwYBw)$7_#${=dGyzOnK9`u{EdRvysbO3)n4(jKnk<|ymWhH>5sehF4q0-%X;)cf=@ z%cl8s5{;Kkjc?%V`?EOU@DjM9EEo=t-U{+CX(V0tFV^;$>t zN{y{KT|kU`olZJ|er$UmhMiy#4fEK4D?i4Ia28EQS$Y{HBgXATGCkuJkWD5y?p~HolKG!9PV=`^1B-PG>MTwc?QzS|2!yC_h#1$xV0 zoxptVRRPNU+O0yhZ}x~LLhktfBpb(*>1DA+zsfg&$U$;Cn?xtWxN_-#6Q{FLEZ=cW z6h6ZUIx-CJ!&Ph3V-+sF*P$H1yov|WY&hMaP8WzH8-cW#oFqe}8zPA`OP6&rgru*T zjMSYBu2MiFvQ(LLT_?ex{-n#d+l_N3?cljLUei@Oh^9krT>Y@o=TD6zpuC_{3xS(p zEaYW49@#?OO0sM6DB3|IY8Xn}tl&)iX>*Yo5)vCvvS~KG9LM2Vls`>>vSfvdN{{R$$+j z5tP}3e5jILs@g_LY3=EnJ84&jyWjrL8B6mJeg~_8yekWWZ5<7+mL(W@ewK`ASFce+_9r4B0-~Lz9}FhhD7ZKSLFgP--svzqiH0bhjVEyq z2o7=-fud=EYDqAerD;6L`At5Zoq(8xu=-irm_i5Q=o5T{e&t~hyaXxgA_n0KS^+`o z0>oFAszBnxiR(qv-Wf}Qu;<~I9ARO(F;syMAo3=#>A(aXJx*$S$W_h2)y=aEJ>*R8U+J^1CA7`LGkCK1I)Da-%2>y*z(7xh4 zTq0aDq7LHe`(zYnv+1Vuc6%5{6Z3w_v?c6@E5SEpC8SPI&tR5F;M|!^vPrX%8@RB& z<-l11k8zyJ;Q9?9!mAQox7nZ`cF!$V58cq50}AHH8Vx{fZz3xIQOoJ>*49?g(5j7= zQy%|{SOdz-9TaM@t00E0@A|q$ggOw|Od;gdjNOhb4Cre9s69k3GH!2zw!HcajlEF6 zV%l$lfJLEQEnNW#v@E&7dY1x7BMKN1Q^xF;JHc)rR#wt%G6Jp=r{@W98{jI$ukr{M zZ=OtN5w?|^^1`ewFl@UWthU^#LQ&SE#*2t}LUbM_Lll~V6Mh*CBa$hE;>`f6M^L1W z;MZS+d&QD1luN3|x**jrG(?{oku#pb%BBD0fW?!ne@Tlv?@i(hV3;JCIT%3%q*<^V z?u5Z8>i2=8m5^NQZ=oMdQ$SL-AAYZ-XCJ9&z;GwTN2y6{9#sc6QBqHd^~9h6KyP#D znKE+_adWO9dyr^aG~OPmHUZPo@X~9(W+W`9q_g3$9e9JP8<-3w2WcI3)aWET7b3zQ zB)F|a0ql`EE)3ua)O*?pN7(bT=sd3IKJL^F*b;LP1<$ha0Qz$oWV4BCQbx1Npzh=% z;hpdz84iV=M+z`Tp1%0VvCXQH3xO@Uy9z9Gml-ri(c}|rNB(l(F0w(Fuie3t?^Sf7 zyy!h<{DsTDG?J=~)vo<*Z=A6D>jr)U?!Vxee>*NvMFJil2!0bra7UcL9gWKiyn)pY z?tcU`2#ei40D{n7<)&NInSL@D#1je*$Dg6(@|r7U;{ehRNUbYX-;&=IC*E+`$JR}3Z=sH;ie%wj4?eWd}~&S10u ztZ9!rO06^v!?-x?s$Z+PjBfop&!H;kL` z#^at2>a^Bo3BgD}`i{T^&4&*iB>p)lfUTOq;`d_nhZf*b2;*-$q;1jU|Eiow{HsF( zS1pVh*;EOu4k;ElYndj}R$L15bB0Nq_G&x0k0P2e`CX?v02`^@}RZAy> zA&>(2l(uu5m`!;&YjSFvI+nEIVjeFw+|l{agQB2Ot3XPFfmmQlcO>_F?bHg zv;3@yo!_0tBbHkcP|NADi*efJ*KqH?SGfc%iQ9!^*A<&g&YuOtk$wVLMS+8P zdu|t=+aExy)y|VVgx5SJoAw=*qJhJ|IEx6tvA<1bsW**9fMGNq4=-hVvk;LWHVr{c zlPR55HN;BPu%G0OS-bikWH&Jv=^McBxhz0^rF$SiZfxQn6pzN!OS0Sm;Z8G<%(4V6 zbEom=aggNsEY6FSjaABwG0n0uj?JpI;1zS|TqMiATw_^b#m!9H;gDlka=b%~BatXy~+Q=bz3XtiAT|C~(kplB9iM z8N~|KHlkw1gsnvRY>Wm&F%da^>Rt4U)TljatTY0LT6$;wWa5>D=WF}HwQ72yg=V8D zcX^xJi&Zj3FNr<%s)gq!u<)<9uvVXIqS^I6nhn_)Nfzngzm^I9dksJ)N!E=BAT44ucnI04p!SVS*%j zI*HOLf`hW-Xi5?$cf1#&&DCo**njbU=k5CguT2uNO`9^zl_j1+g!}>J;1qV21e$`W zMy`?rQqTmA2l#9izC=M9&<-6M;dj_Xax8ih6JJyG?N73Ev<>HD;84?n;AnvQ1WZkq zf<*pZjyN^6*P^opvn?SI06P{qNy>GRz(GwQ6(K@fc=m}8CFl`42WruT@DAh#Lxr_w zXVe*A0Nu``)0oBn6Q(;gh(n|O84+09s8yyWf}%>mv(SnSorHOxfhIQ0PLm!fShT8G z7x3YQjC0(+K`l%AfY;ZI6K4VYUz$Ad7V~c0Z=IbZe7pB*r@Qm^r{Er%OKn49Fui#r zW>=TF!u=e{o%DOVm6sTc#jGr9P~B2ipbV~cavP57eYNR{)0O~1rf2GJ3)#R%57W^O zdhMVO+yq`%jZsHJ!ZB$m29PV2QO7nH94wDw%N_0;5N!qtkW2iz7n5sXko8E!>~97$ zaW25~+mROR$LC_FNf8G@BX}4zP?*D#sG}Bvgf z#Fr8Dq>u{HtDCh0WyHY#JRSn~Y_5Elubj3kq$CpVcxNCvQ=vb6Xd1n0wY+vh+%Qq1 z*)^o|Q<4p7jW?7)mTd6>aGW@QpY3O3mVC)^OAf_)XbNG*U`VI|%K1eaP(5ciG*eHC zTRG+&AU9na^Z%=Ekwpy*i2D!fI5hN+;*(9z!T2$jhCuppPkHb}x2b^=1ZWrnS zV$<2^1Z2<_zaV%lJmy1S%FF#EM_be&oDC8;KH8^YTSGIPFdk%=?5ZSORfWh=JPi%9 zWgxSh52XQwC?z-o;Dqro8xt3FI3l|$P5>w@Erf&)40#y5L*0PXoHN>EPw_WRBXUo> z<@vB^i~bbHW1Ty~8E?_ZS_j#12+Y7>lK^syc8}IEZ}~X=l6@F^3XATefoOQB`@XOb z8(qK4wvD2m-&R)7mIW&H14!Y|xdN#AK_^&k!TS6ZUoz9AUw@|W{NNf2h;>tml9d48 z9(i23dJ`d+P1(W#6Edz2ZFh@rCUduvE64VrC&!@PX0Sd8`(_7sPK~8NNX)+}9jrY8 z03Nx#yc~Rthds3Q0tNTulNnRqyEy7i@v|*UB+q*%o{7*llafIj+dYrdBt|`1eLPS~ z#RbA*0`iI}EKUm!nuG55#V2++%oB1ojc}La6@`_QHmUR^Rx5Q;Fe?mIt?-{o?I-aI z{(DiTN_kZl!)_rX9@heG2VoeJUFq17A5FhVR;rgG3;hWbB$vHGv++7g@EBt|8;vu< zB05sN2a*ALY*DvYi!?_Yufq5=be?6(`0%!JApBbR{k~2V8`fvT0zlh`R9CvH>s6pPyR^1 zpbVmGd2Q|S)5qVhu0Nw)K@Nv1Bs1Yg7eIhO`S(0tgf>zB5Wb1XP#WOBfVSsap%WPQ zpW&dQI==QQLsfjY6jVrcXqz^o9W26S#1EA01iFni8x!^OPVzns%ViFnGSvBUk$tDg%iXsXoq6e$`1tO{e!0X) zEnz9qcKkU{P>23GL{uE%0cz8EL}Pu+fw7OMMM;#w9U8nJX`w0b<9J$VN2sOaU_@U^ zL!F@MJwrdBVPCFk;ljxJ;Of<18JdBT$l%)O? zl>T$p(NS`OHEZbr_?`^R{5nKglw>`_AFV`)0HCBaYxV$aE=l#EQ!ar3VRF*}Y7%Je zWEU1bk3~q&y(T*{j6*971Elc;6dI5X1l42EEM~^7A90bx+KJg6L}!n{xif`>W5(-` z(eZ&qZf~2{)=x-OOh)7|oM|WYx(PcT%UJbV3D)cw+J=^Sq`(lGF!Hv*XlV=NQY>&$ zj-F0tsCVX~fAQdZz@Jaa>@`BX50jHgG`X}*(IP3#AWR@)`g!26;_a4a%6jc_4l!uC zLJsX%XLQdE#Apz+0fXCafgs2)5|Vj~a#z`>6a@0sjxuu;T(^q{(o|-xP$sG>v|cO( zefP>;mA~WIT$dRsuq~n(`+%5Xn4(wuP$3qL5v=A=+G zYv$#s+Mi|pT$HaSIb(G!`?RFXZeBjgB2}rxK|&s}4+LQRwCrl6Hnlw@XJm-sciVe! z58l7~u>F4T-FKmU4ZnN);?2%?X5DLd?5wIZ8J;JT=?sV$Ic)JAvDuccN5^&SW}UL< z$WFJcit6kO?+{;xpW{KfK2RrGwCOhWU0b*=c^()!8xQGFQAEfm$#fKrm&`87YCiQJ zX0Znx&1{aWIM63v&gRnre0n6Gm=??@+}fUPDyM@knJ+J~sVAF;3z-ie;lsxIrePlD z!w2~Ao-Ieg)2Fm z<(4gHN~SbZo*-Fuqesi}X|LbkXSWs&qm+f$I+^XU$ZQ})cVF$j>Au=|z58~jP6||6 za+>W=l2JnYWOdPvr&6?!)KV!x!)SHs4eA13{eXgdFNo~-A_BOP`cVjIRRG%Y+-s`8kfkxK6JZik@B8tTKNsF7=)US<6aUC zv3q}uFU8_`a7Nmt^X*+caKE|EYcRf_@Z=wgH2Io0FW4KN%y5OOt(t+hg9ePR(el+2 zbAwvY4$n^gR}wG?ShWuX%WXQMp@0D)kbi|qP823%t~gwGJXBq8%`3~PxWy3KD{9DB z71#she`V|esoWlW1mriw9$F9PV2@t{?y%3&++V2=h`cc{_z{zk`gHXc4+lwjH4+RTxe3!6s-r56e}TUe3y5EV?f8Tp7Ta@1`-DE zCztAb+T**?Q}v6=XG8IC0p@#baCi)i8mlm>VXp~x2Xq*wcb5f_Vb=tL1QZeygp>Eq zl3}01CD4&fn?U4e*$m#I4F@|yXK(g01_9!3H}x1`Z~~g-$^;2!S+KZFlhBZjA_kPG5g;~z2GIndy%^?M8^Jk7 zLpsnk&Yc(85168KMZ@e;G&US^LX_D>CmCUICkM->_EnE2~s!E*uof(?Q~k;K4M; zP(#t=@&P###uN5wn4%g*&KNmfxtqknF-qdNkPwiiPO>5+nvXM-dXM>n9t|MmX_H|D zY-7(JG~PNs3E=T&Py8(xUTIK8Pa&BjC7A)S^dnZEl5Q&*XN4hz%Tbl0$2A8Ugmn zrD{et(pj4Pa~2OTB^1`+l6AbOMEC?n*H))OLuoH*Loa8^aJtM6)ySKYSYo7{#5DVv zge-FaPs!PX21?JQxO~!`lr)!#GA$ZuhySsSzKeU=M4WMq8wl_o_?6SmBsyxrHNZY< z=n7<(H2if5j~$&f@wmCMoGq~f#o3uomoVZRE;m+30o9|~luol~iAi+5EY<4hj{TWQ zBS2FNb~m5mF{E9nv`Pn{fd7lv%*9z|?p}r7e2d;KYgH~pN6>Ke=}j~a=wc8+xa2#^ zqcNj|{Oj!r;Ali1hUC;#=M~7!DoVTDW4Xzs9m@C=gF!&x7Q@jQl2&UO5T z2Jx;L_5@F@TEZw}-et&J8FwE8h%G&w?Q2>RX|?h5`#~!p&?fPimqD@32BR_rTk6Z68MMf z^NDLoaKlWmw2t_-SwFlPZq6&?ozq#vf_!D0v07?H;I%Q;9NAYlYze%13Vc1Kjf|v$ zgE%%0Z38jvhT_?lWUMo+^s`>RGR?-zL*%W?(Krdurla8>Cyv}^HcJo|+;$6MU0G~U z+YISC6TJ#9QPIY4$pvIdc2Kr0vo8cKa@dF_&=%3VR>>__7jF~inl`?fkS6NBk!kl{ z87CBaDO)Xf7TqcEb-{L3G$Xn$p#!=%p%%3w?NnINY-Jum0~I@KRq|X`rTg-=D^%>7 zoj}{g4wIvhQ;4CMq1tcq<~k70LZJB>$qjf%uA=)iB-oJz(Xy881pH=+6TXCiMT-LFO)Al4?xcM>#xW?{XUERgmG@nDO>)X4Xjxx zfo+xbLU)jaVrvkyuGlm=TScEFM!!=Y+N5`>JTyL}pHk4+8sd{}ow8(nzVF!0&I23| zFAuDTFAH{28*UkF{X)sR&~v1yU-RMqo$rbuyTn=&K1e z1GX*J2{meZEDAMNgc+C;^)eB~a?p4nSbLLs#g2_GPSht>2?Z&_yW?J`>UT1gt%%{E z)`}Yn8CPkyF{(uv?1ZObaQx@jSr)AP5E?3n=i_k6YU!e4a6nR$zxu(kBsmq>RkqBFt5S;y;pmk z0I8a<6ebi7Kt~JZ1z|Ecvkc8q9(`pB^e1Tu`}d0!Bs$WwibrhOu)m{Shb{YWx|VU; z;n@nj-a@gpLUCWGs_nq}Kr6_3?(%B#-l%QOc_(%@=anulJ6hW@Ux65ss4`c7-~h&I zwu|KKVj4|Yr_2(zJ>SXV!|v286!^7DpD>iPxJt1%NlufL{RGD>toE{TQqm{HuY+!H z6m__Tl@k`^wYmuLRw0S)^a}?#KsjE6I}>nSWwF?#-H$38iZ_G>gy57@hh!Odh)W*n-P z6f|%%cZ318z?%WJvF~`chFFCTWal7cC!m3(0yh(Xo7h`8w^i{FyJ(F^`zapw4B0~o z`e4pH#?i!BbL(`%z;v&6)4XHlu}*OlydfDUVmA_DwJeF+TNAOv+{i)Md9D;{gQ7@V zTf-gqf4|0PNu^GBM#$dS?$wFbb5(;cmA@+kRxhsSR!e`sYrMg~k;c1-(pe>~$DRbT z(7Bx_F8mZvFcuAq0CIY1PokZJ=(`XGlAO%8Rva*=mCyCOT1`ptNrw9iG=!lquY&iu zufTjBbD?AID1o%+aC|U2Nc6PR8%M(IwMyb(n4W{L-e3{eV$>Mq!)>V zGyk#!-8HDf$1`=RC2a-W3G41dd|%}QUVAm6NI$X!HS^Mvb1NVm_+@NYnrm7E*QR^1 zp%yB?*DFdzhkgE9>5G25*gYi6P3!LwZ#-%>8yBDQhM;?v)cV@5N)h&zslB=!@^>!# z;b5nG@b@<__w<@CtTC0jr*S;8buvKiB?s z5EpEENNFm#V-9@p1wtb8!Ud6;P>!|H@_bc$Q=Q}-p3d3-tNBrYbSWM z7rfnjAB>`OhT+joMn^WIF!>oQ(Z(fy60{T1bv7I5E)2ugG5bKAT(W6}Ldg(X^Gi@l zMhJ_79HP?H#nbUFJ6}+QjF$(HiJS2BR>dP)D|HF&uQsUESm?N8=0JkKN zN&CA_2)@2!69b868?ZR7mf_aS!JQNR$3)2!Z$28=A6cn^!95Hbo56XKC*uBy634Xy z2LV~`#|U9U0>^WM%Pj>>oiQ>TMiAN$@lsEE0YUe6C-bEg+;f+O1Jia%5f6aM_G4n zxwNNwR;zdNMWTjWC(b360&h|fcAw)LBjx18q z;nxQKmJ>Y?fb!;Ei?@4xsF|}TDsG^c%F7u#&es)P$BV0Fq&W@ZCU0;kJ#f+ZEx&8K zjaA;^_Ka#?D^)UEaHm zyl3bD9Yc5A$d9<;dxTNG7h^*gZZ2s^G?|U}610kI;8E9j%DZ?!$zkv8qd(|)HqAGY z-*P6wfKn{Rg8^ng2vwq_@R*Zi1luC}&*HwH^pgx7#(N}_qOc&gVDefu?Z#=JPJMFr zTP4|&1w*~$a>8*3BXVx#bB6ys2ta=QgrY2N49k3#0vm+N5ztvLi$ddZUW`C$+xCzt zF|ECqAs#gHQ8MXr#TnSFFa8=`1FP!rO`+DDs$^}dswrUtYAe3QU9zeo9YV}&5ht>I zugZzYMqHw{{tJ;j{rETq>LenpG(Z*OnfkowcE*{a=DEbO#WJ78Eg>g5NYXxtiU--` zY?d$mMVZB$-S zOWy!%(xkmkpZtr2>_YG%kKfYW0;1Dz*dX`~b^V^#KKJt*b4pL%y5CSsm{Ir@-#Nly zs>Z|n(sfyePWCx!em2A(KRq*1i9&&)hG_tTR`f|WN@k-lo1CtUSI(bl499*vjS|oe z`&*CJ)}AeK=4*7NUK2e?HsnTh(gS2vg-~>(Cvv{rNsg0%Kn@?HF|N_aNOz2fN}9v7 znY>9gT8FEA($sj|xCTo%9pT36<0sAt*Ts|en{TR)G1gYi(Zx4BnDxU(7d1@ANQeiY z+h@^a8}|8T(!y~j+&gZ9W@x=`mDv%mw?Yu{oY-Ck;^3y!BZA#hG&1z?XV{{e4L}q* znys#_Z#;VZKR?kJ?{VRB>! zgQKyp&7hoD)Daq-jL_YHrdg$0bhr5j$^mkISBMrdfGg_$7RTcpJpy{4lsSlZi}3+6 zxpuq{h#vn6<;7X1?uisF9oUWeDAzh8&g5e+IP&Fu<5$SDu~HkE2PI+Zc;MfQrAP8~ z`Gu=#yL3Dc08O^Q4hc?k&T!opOJelh9^l+M7S!*gz7VO`#bc6P?S>xl_UIh;*l%AC zvy0cs=QnXxG;%%nSQ$Vml~mTBJ*L`oqEdgZiAsHCuGdu!$DGc)DFL*jyE}Z=Gyo|n zC_{IYUiuu&{@RjZ=+>z{$c44OX?4e7i&4x>VL31 z6Ds3>Sh~Up=?ILb6h0S#4k*Ox@(SQF{Mx&l6z?yh9TAc7^@M&sg_7Q4d{s0#%ru}0~)hEQ%$+8 z4Peirv_F(nRg9?{!g!naBswvWJhVk!Ezu!PdXZ`tK0<*mJjF8zVDLnRyz;iQc+$h` z-bNN|EqX3QXD6;EUpyJQza8=E866B#I23>p+y|8`nO=W7w7zhlQ(oE6mQ~UR(GFE{ zI@c(VrU?x<94G#^FVwb^d|!97>wSu?F%D*>gEVWdjYB$QU z^4AwWGAZZCszB41ACXA51=C!|H zckZ>e`t)XJWE?ft#LKVr2ij;g8(K!4g2*UZv~&6aQLjrQfR3wtQCc(9bHLC~ad3>% z*|EJ!)1kxkgpQa4H@qn2U}q0iF6lamCB12}hkAu(66I%YOzI=owDYW=jV?R5ivGHd zOGnPvrO6uSF+>cPXqbrpsD?SXcM?|P*CF<(@#ls9pH5sh7Ft&J{_Q^N6$xvD@zzKTKHGYQegv0W68uOBWaGB-VWXLG=P%I!R z41GHN$jMOl#D9&7qdSKr1Fq?ij=2t8zl0O55yC2x1mlGdcrFnEgHO;vH=(!*SV-_5I<1Y3ioEXB-4nWTa*82cIv5U=B3snmD0g1r4rWQQM|x z$H+DPesI6rrN!Ir-lwB2YkPDyAR1QzqR{sNx zV|7(Mfp_3edq%s^9DiDr#1?d@y@C4zx68Ih@^W>ax3C?mQ9bJ088H@4Sak3oFWyns zpGLPU*|@sg&2G1`={X}ywi>R%11h|d+&MSF5c&OD1ldP{Re_m>^-$6%*1|`@`)n8X zs?&JF;zyhe)hh*m^C&_2Cs#dsO%FGTQ=a5+qqn>!9^$9x!73*Bffql(ujc~2Lv6R3 zq+L{u>~=}2WVmY^Fy4awymj}H*%u7VzHw{cG#V2O$)*t?XAPFnW4<7TI-LmVi#Qfi zoG_yjqb9#|@Pq=U>Y*paC+jDDRM$)twcsy*adR@DYP>`N-uO0W9%B|lbGQ*Ka}={> zylW}#Nk@j4cKQ0wLBXr^*?J|$kNvciFr9;P`!XAt1ex&gEb*Oh{OA+;m; zaf(ZJHl&o4)DYOLUQC;-ElS$eWs0CrXm6BOSrlLp>g2h5~3p3F}bU3)eqt@WEB^0nc+#w>r@{t743-fQK`` zFBu7qdzuxyN&n(jv{cbg%54-1Oyn9hZeV!1V>G7Jy>bqOQI|F1uh)n7GIm+;)R^JX zg(fHzv-FBnoZ-Qv6J}BYmwdyVk(BdlxJ@MKKu*JCa*n+yDE@40!z-`%IvCzI7=V+e zeH;5vHveN{5^^VYJdBdmrq!WHG87hBcM*-+!ncGFQdQ?DU~w92AlKu=LqqmQN5?n= zY;7}vswGbt{v5>^z!>!JE`K4H(be!-xJu7>JNy(+_kN5()bQgLbL?~pZ99w2JcZ7+ z;M}tHdk_S4-VjFQC4w)mq;NM( z@_hs^n>?B^pk9j#f%vucg9bXSEPlAw4%XYj#?hh`e!-KyPO#bz8khJlpXO;zj;UT* z(KjKJnWm!Vigoext2^HRBnDM5qA+RRjGrCekM5(@SA!%xSUGBGL$H_Lw23m~pVz>> zEzAB2UfL#_=+%-597Ejdrah|KDCYD@B?oD)U`;FCpXRIaY`ah&b{( zbm_7ziQMkS#P1FjU(=ytW$C7l61C~6n#IfC1yUuFv{@L+Z%TO?kmpdD)z9$ZsktRt z8!^HQ7_nH$2nh>P4jkcqxITP@28_q>_m@c3Knux~E`3mP1QcJE5k`JYSw`>%W;Uj{ zKS>I_1$QYow7QewAP{$1lWD}l0^tucjc5cc(pLA#=D$sf4I@2ohCnsL878_ z*n^$zcRTO9ulBlc_uhB+UmP5C-~Y$%L3i(6_wRcjx*vCc{IUCTr~7*M-Oj5ic_zIJDOICnZ9; zMel7ux+i#Lyxa9xUWekevOk`iAgwWO>>`qQ4JoHiAp>EWkdv2*8_FrF7_!2+e*&A{ zC-fsiucG)f-Zez+`997N6rX61+gbyYV@ZZ84M~7OemIpqG($@f&-pnLp1q1x4k%DF z1M2fp3>s92AyfXUxHSIFJZqo)&P*5eti@A&8_=P4YyI+stI2h+)dGEOVpdl{mb)s0 z)Hnydj3g+p`Pkx+kw0erC_mHF5Z^{sHkCQXDz0(!i;3!@u`3;+4d!&`^SIggFLX|E z`yh@@=sZRFfXEsWq+HM5`C}TP%toA3C#N{TEX(-qGem?whCZCepD~>l#ZR3ev1w8v za?Ik`H&hC$wL*!C(_A_H$3On-+wj4|=grpPKaPI6`t@i9(|-QrAK(6|v81ss@e)!2 zJ0?aaCO8|SPs>zt0YO`*G0+QIsN9x(0Kb|Q-U=T)Z$97p$3OnjZ2d~z3?IsiqZW1M z+chWx{Qe>QWIry;w7s~eW%U@yMtxvNbFgz2;w7)9TyFBCsqx*cYGcKfaZ~@0$J^O( z7_)6O*usNAiX=$a_?Bb`vu`T&C#dlz8f#I^-c%}Mi5xz+Y$b;($%oW?rLkC@owQu# znzjn1wdsR+s{K8Pi=jz881UJtRP3+!s-nN;D%Z4C?60Ofj;FgoqMLeY**>tdTj}HO zE4il(?D-uqa<{O3hQjzjg_k5Tlur;=w@v^m%`NTO@T+Nj5hnzL5Xm=VWAcPr3bfz; zc`!P1!%GulmjQ=R)_xPkHPG=CAQUKDL7Wl@TiGI~roL1>Y_Q8*PDgX3!z-a02ZlQKLnZK1&3h!;t#^ns+AgEUUF>oU#?t2aNjcP z{g88TDst;idgLw6N7ESVeTrAKftgaaLcV)|7gst~*RH$f!a=>pu;Nypu5YZXt7|-k z4!nl3?7o^$H4Tcam-G#?bp5=-+QqoLx`ai5_d9=S5=cs4nWU&Zm*3`N&D zj|3-52f;3sJ^@n}4R{3|Bss-RTB>`t;BAn$L(xfq^->Zc5$HWGj_hs_*Yd*v>5(F105yDa#DDqb1-J`iV;`Qs&cbuuyd z?W0Y5#2bgE(7CEuL=MKbXrs6z%6LgrUZM}_9=gm|tPoAfoRbZ_3A-BJdEz=bZgN=f zyBz4l+c(-3bE~^vd5x?uUfCrzO6bz2-K=VC#cj>(>I$=BZ&TWovK>=N-fPv4`K4b_ zYUCIx`bL>y2h*C-iKLS)C~gcPCio(eWU2v)j7%BzM#wr0)D-FS3#Ir3U;6 z?3jL7{$$rJhcv(>cQMTUY)pX*_)Jc`Zh_rHw-2W1bl2kCM$W4Hz`7go6bo=SwXqG_ zlIY#dt!GnCp`2u=U$%ZLu$tu321)aX;(w5IV>b$`#jCH5sPI(I;~uMaUb&Q=MKLVc zaXPDME=B#XCM0JaK78m9ku-Hnl(>cl9ll1IWtZ%NlYo{x#+t{ldEhB4rIy4JQX0eH zrW6|&&#~yN`~*)YVQ=IFR_Lq_+ZBWIK7gYeRu0V^ zNTjfoqJVL>43h0)%5hgs;I%4{H=J4(h_2tdMIFt@v$k#4VQQ#J&sGht+`aIIi^(zd z(p_hdAUe+RV4Llba%_Hp+V=}gq}ZR0ywHl-c&B2UOb6%y-DubqTrTKYS_*aur?g9+ znjg`AHW?g+HnHJ{Tl38UZf(yDKq1RZpT`q656bU-#v-;v;&RE>rPylT*n`y6(~ z_8}U#j-uXXX`m$D`Ir~ ziYOsD$4|$)D3<(M&LXn?-#1#v9iYioK$DVH3p;SMcbxVRYMd= z7G2x(_oRK@`Si{?RVYobqj_9=GEq2su!j$nT&V)kTvAdbd~lV>JD?Ot7=~HUjeBXy zJ=*Iyzoi@R;!`viOya(Nmy^-<5=lt;+1o4yMvlT=|0A2+iLZN|&MDFrd@?@rvez7# z@W+K&QomC=OecV>o|PO_D}^HttAUlagsJ{!8>MxgQn9o!c2`2BKC<9xUj`(Abg=2? ziy!v{zP$qf^nkGAV;i7)&*mVp0)#y>ybg$SxG2$O6~>h#ddrWB{reV+TPr(u$D3^R zTKx-7yb6`&TuT8hI(z1b%;k=?%(Z-Cw7D2PnJ!k-h&gD^Z$uTvYKzc;$YVNR14?vU-%n=yMV!_Z#bv`rr5CCo4GZ(P^A>-F(tp>2?Pg?YG<2X2YLznTkC$h@cVb zlxE?AOe1CmU~xCXkatO?tyx9;nTf{Bhxby<e5!2J4oO7BaHq46{@3=FmWI4)M%t>!WyT+m#ueNxTIBA^wIqmF_)9 zCwV4)16`LgV^QX;XnK~1>{71G4-618QFDAlh z;~V6R)-WVjvo8vO~y@(#VK2{jfW4l{jw5Nb2-ccbeCG; z4%Agi&V_9kRz~nK9s-k~jg@5LQ|nYi3rEY|@E#am0@JG&%rkNCv*74VTw;Xb=9MRS zUjl2=5pbU?>2j14bd?z9U-$BY$dh(PQvNk$ZkBpG@W~S=fZvppU{!agajb4kC7^AL z6OI$!I@XNIF54kjB_%8+%Z4|FWb-g^Z6~}{w7%FfT8DMCF6(F=R>QipkPyWBru^ik zq>t`ma+VW8D6gmWBP%M(50^1u$lu{*(+SLe$f!Ef^p=D!_lT@ILvMM0zlzLsyKscX z)R>E1?tWYq`|5Tv1dAQf@LSz(oWCI;=9JTX(|8%4tFoL(wLz3KaXp-XEvA4Mzr!i2 zq*Rqn4g=BQu1mq_fzJ%1JZF}p9k}mE(mr}_7lrO7=%GcoSai7cR&{w?Y+cF;lZ_?P z|0_mzvry8z3RqJmJNX#{o6GhBPjnt}^0V9f=k&e<#bj1H^x|q0SLwV7=lOE{UY*`0 zm~2gQWMM3WytKtT=U!Bx{=kcm1jZa@hRQl8UiM=cyZH~R-(zTL_khwV_>SM-CiH)$6q3ush_Mr3|mm5^8f`9c(A(*u+V1 z-^IQO?jvtYbLIeZ!xcMqtSsWep9jhBif7{7_Co+6LvOZ9EBJQt1aG{8CEr`k%3W>w z<>sweNj~}Cvr@Hx=9L>*T{F`08g>`u8ZXLn5;|vLdX_D_$>Xy0SGlC2%RM`>glnIF zftWXCJxtZ~bSi3a@AW1PmpZf}yXz+4t{SekS>7Zb-!5y$jY$k-+)2b7t&+%|w2CF? zk(rh;m2wRiS_%Adq1-&sT*6e)ut4fbtPm{OVG19!Cp&jm?Lhpp4B$Qnfi|@1F9sXX z2ngNF!dSUz7VjA@FE`X6mMB%@a-?Y$zd3hwpWA_S{0{&3to_B2AXC~b&rj>R33Aqz z88O!}GCWpupE{;J1cCQI%kyc${u|EdO~+SaGz;fjpa@qsM#C>xZUE861CdX?EC28@ z%+c=IeAW(@*DAyJKE7R$Cs66LvDiRY*c30q;;eYy(xp8<5%LtgN%EAVo&{{30PkfeR@h$Hic+Qq%d*xp=ChhCA7K(Eh}0+ho+?OOzUIFUjq_7|#A3?CwZ+(7!@st84WZ zhh8YW^?}|fkpMLFyry$hv_nSO3WVP-7*VN%RRLFA-pVrn|&7!X8Uo$R*9Iwq$ z)lbeFt#;6eY7$B+*rzBST(!ao9ax?MPRgc|$G1D@0%`&Ead?6Uk|^S0_<;y*Xv@Ro z6J|-+AX_`Wc9c$}nV+6XN`qV?*4G6$f4s&C7z&B3@Dhf}W@R?1WeSO%q!n56ga8}= zYP7k4c8Px@>vcY#vWFvJb~nuZ$g~ zr3=ZaYUogrxch$R-HZ2o@9H{1S>$9;nj+}shrj+6XXjq?@QB0H0GDVW4HFZUm*@&h zNB+?unk0lfJaxDX5`&|Ql`J^MddCF!_3N(FoyK4OVjAXl|F-k@1N=bgiulz}FMj;6 zLodmIgRf=Qk-X_BqnHNJ&953%9?aYFFX?K-+ZS(kJhLa?nra8Hc3!{u@ZLWVjKT9}k%U%Sr|(TfTANyLZK}K?0C^_LAbDPH3iPT32R6h>a#KJi zKnv#vL(JpBi&>^7wIR%8*U*woD>6lXr=|F*3)A3Ug*bO2cq75M*#;#0N2(#xc#4$D zDeV)#a%i94Y9BP3_MyBuGBB++sNkWRA*801HzfA0AkZ1#^WdH{?&lU{$AQz3dbAT5 z`Px8@(n}NnjnIob2^~B&_#)Rd3$3+m<~&HDzV^4srFNwNxu^dS8KL@S~luO^b2j6Ve%3htB<6##bk}o>j<{ zs4pUJCLP%t>#E>3WGV=s=ont*s>Rq>#WLBS#r7;6_KnZ?^eb~wGcRZx?u^p!bX+q^#A z)MHeX2%4iN)xo1owM=SzbgOLFOE{WzVUEL?p^8f@ZgwB3B(t<44U+fu*0L$vCCyXrG_ACYUjSdHG>9dTOp zes9scUY(6tF?;=jn`Uzt`Nk8oFmIlLeeb#~Ty;sJ3)-u9rn%>?Ja6GSYe9Qrm&Be; z#%v76Y!uupoF)`PtnRt4EMD1SU_JMnFa8@v#s|iTf9~N)?-!KG=gIAkFBc_;#h5-> z#K?uQ*1GWhvld5W_(BSrr;=-yEffTuK(jtO&NPdeO?$tp4<-Zmq0yya6>sV}xd;wG zYe*>gbZ^TNRUlMGaXo=V7#QE__#~kdQDV$6bPgH`(dH5SN%#YaL`&v^4GlrBYyS`= zEtihj#g&R2CJ!H4aq<6kC&1(Zrw+ktsp^m;glJ0j3L3jzVxz_U+t$lA+b4IMbL4h+ z6Pmr&?`cXFx}reW!B8AAJmX88$A#$+p!5HVn-08IwdS)CJ)0|jER;-)I2%x*B)67zM+s=$6PND6-`~94 z`_X#y)TNr!q;hJ1QmN(jix2-#w+ziYH=&#%9Dh*2W1jbrVyq+K2S6Q~+!QC=IIc+& zUL$vD%tlH+VoK8aC-ZQE#QfEu$J@fW8v;@yh9?=9PNRc(}qKTPLvs zkuoBE{qkIqzz~omTy4`GnG2;5RKQ}xD@De~B0a!6uh3zk039~?C93F3?PHGe#kPLj8v>rG1fId4<0_1_0(p%~@rmFZuMZ9rzOcu) zoNR2#FxZhCG&8(_;}absnOvF4TuPjw+3-0|!_o$peslR^GEI6z6E#`$D$G>O?$QtMX5t^`z?6o7tkUmeEL6E1YCIv+aR?U1Xt3oD6R(Gm4oIdPlNe91n1 zeXGC$fuMC9Hr~b;d86IfKLgD*ZydG4Bp0{Geof2XaFS@4o!;juv)7|i8}|%m^%WKJ zM@z4?FIpHn#AV%HIIFd7e}hca$9wUHp`&FD*0wC^z+A_Z{=W!D83%M7Ma(;sLDDmX zcgA-vVFy>5#iWpUt<9$?Iw|UGvF`jUrj`B`d+;msj9)oM$*(+dzY2<>S4XX1oiW0T zmIB1}fB=2E41d9fu9^-STF;%1V7Po*J{VN)(pyw4n#b;y8_TFk3_xe|2-b5PlAOa3 zX?4mqTnePYci;FI;vSl)vE{^`|xhpqrNkc zjy)>J7YFjG(wwa{DmU4X>cvT*j~xdewc4AV21mg*AtR!e}$dXA4tx{W(4oM>rD-NueaW=dhV5kBRx+03J%bPpLI7oAJPsHHj$yfq* zXBR2M(r^|MO(C9yqwHS*DGIa6=}Md~e>hm_XT5yoV|=pmpV4`AKw{6zJ2iBy{FT(L zE_Wth;eS_TqgtVUmy?6$@o=!r*#=`@98dD*=XPMSH<2UTC}w^e4Nvj<@!818#Ggq9 zzOh+JEK+zy^DudNn8JPA*hpM-$!hpV^|ENcPb)t_kxSVi%CEm#S-0sM98 zom3av1E^6acpJUN(+v2h3g&0rZ!YQQr7Ms-7_!+=3kY(j|09R}fIWit6n2u8)Z@5g zpdT$L^c>V3q=1uTm`pEG*@LYPg>MW;j7k@SvwV>a4#P_lRbV~g#-#Kzn~rDGF!+z` z0%e#s#vbXDu^qdD@j*n=!5*PRd5Q8{h<)W^J~$3O1f?z_3Er`dU?Sq37|G)~)~gP^>H*-4_Qka;x5x*YXi)?||%8lV>} zCrZ8ml&PQ$%g7@<2qr^QQEgPT`2iBlUF#$dZTt%PwQ0X;7`8^kaWi>?sVt8jp@Lu^ zOL*anZ1PDmY9djgDnZD`zImN-oTRpb!W&{F*0{i8-dU6o-ML&05^XRBR!F(CA9Sk$pBDlzFkOKLA zPR1-Z(uY$G`rtnUb1?Fa4#R~f;;4CKcf&Df0Fv@_*OyIbh{Gz|ntL^q@7vT^z4Q-= z2oyx1Acjsl`2lD5?22Lg}Sj#_z zW{?3GR#FTqgh{k;g;n(oJg08MaVBD6R(dbBeS+g`ATHgC?FBD>X}W!6q<26 zRmsAND~b~!`c$fk&MfIU6Agp$P<5|nsU9l$BhiN3ZoeQ|BOgbjk%m^%5ue08%`DD* zns_r2LquqzosLtLt8ZNp%|M%<T%37NN2*Utau9J;BZXFsAuRV|m=I9Y=J zT`d)VT{(QcV2h|q3?#w%)m-G!ZZ(K94E%FU48BOxcn{QqxYjL?tOv)+5X{~LZ;vT# zWJFQ#qTJdlDFI^x^=$lAVlKy;03nytPk?faeLM~utyVR*zHWy#X=LLD9X4FK&}-Z0 zQbgjC7GTP#*mR98t4(ku{*bqn8_ST&AE_-&9?7TOju2|C5zySEhR$=dd8jC@&(@AW1^ z)sXNHbC{Uw$G~ejGncWeLw8Zx!2tu4JOwbArYUD5u9|3EXf#1gu@^BN9cKejx`9uV zK!aOHl4~XrRW>NU%p?Msxcfw2Vz36DUE?s0xwYd)Xg)Je2%?J?^TNYSX=RkxGUoe% z*vo6AxJOI13tXos;6$+u%AGSR=Pq?!=<{RPFQO~eEsdfD%=coP1KB(5 z4^bnPL*uu&6Vx_WvZc}4q5QtL2g3#9ylj^l5^-kyH<*kyccFWUwv&!KfYjLMJXe#Y7N!JA)~=n z$0Cyu=fuUfT*9$fX$Q%I#E5-8q(t%f>!{JL)~hX+DH}7jU9`LSf|h1Lz%OMP;p?-I zD#Q0zTgW$|eBU$Ye>>?1sE>o*N>rt}iGI_)q~cV=NLh4X`*r#Iy^><$tQ)@{r6ulu z6)Tzd&ZGe^+14aXaBm8_nOhyR4(VgDW4{$W6?^sNqKfN;;fXCi@ZXl+3g=pe)@aRo z5fBTTV4Yst9*T)prsi7@8z~tTdR-b zy8V?+4eoa8*E>v(T9vzm#ZtYERe`wi<>#BeimV1np5?CVQx)79tsm4`TsNn3Zf)3_^o?s3Uika#?QyRWOM zCOI}%QVYM%?!7QhJ!=9H3HI{0)#-{ZhRs}Gd`0WQ{~{5sLOA;pT|ZLWHLHeCn>< z@m`95wg1xk2X%_X>pCiMVRbbx6}@MA;#ZKB(eW_7$hn1qY?T4J$I@iXR34f8_bt?S z0d0E=*^4X3?d*luUQn#EDpy`ltN>SWJFyC!=F^SGMNP!%7xE+rZ5&>!)m7Hdj)7Xv58M6GQFcHqTv5hKA;RS0o$M}-d8iZ-xCa970MTx?o0 zF5|Vjk*m^4ySpAVV?nWu1H|0;bhsJcM>gALyUe-Jif&L;E8g(SFVvIQ#Zc4em`)qnkZ#H| zEOZ`E(2E>BE#!8E_<_6nV(r~=%lsUR1ed!?uT5drt%m_0=B zk`&#OjD`WMw>~*9l7BlXmgr2#UMHbT4%On7bM4s?#flr2)_m+x!Ewl0W+_D^qQ!GBBK}QbRY3@@cl$an z(^!=wkz?8_(iX2MD-fv%y)RFcvErQqxVX82T2W?nC8eU4XkR&3Cft6ZY}qg}zUNPV zJ>^?4yn-#D#U@_^O_&>|M7(rujLbu;MW=!;Tcapj^IMuj|CP)~ z1?Upwy4~8AxvH2l&d5mT;u&X5);MfHJyCjDTxkAjcxjng-N1{$CsA_CU+6@>w8{#{ z^CkML24g6)aCg2GAjW^OS+3z0_CIYpNy&;|IA_qEhpw8N`r>w>t8Nxf=?fxVon=Wr z?Sc%{C9nN1=M$V0_-Y-47T#@BT_>|%xydkXei76!>*iY5+<%&bgJ*XD{1&73;CxwE z)&*U4V|?`FmT+$|JOks)y0Fph&OjW@y9cuq6QQMkG9P{aWz9U&W(z|{1(C4sPP;45feJ6seq}Qy@MtP%LlymY>)~BHS;^dQa zl1^gcAsfk6vP>3o|IrRfw|j8gs7;r=c(UX#Pn3+Eb-M=QRi3~EI}Px@-o)G8Z31Sx zhOhkA%@up;0_z`9j@PMA;uys<5WGP^Ls5MhPax<_#Vvs?fN(}#QzK}^flthdCq7g5 zJ(eu)BL$Vd*--}1`E@729&h?1PEyd1Ks#&Zvl9|U+T1O5Omt;^`+=-ve#XJVM-ld` zr{)05;r9a>i)7|wr^p#OCuq+W{VW&n@I*^uYkrNs~nTXzh8;LX|eT|CSp%&qv7M4h4Q z=#&p{HX2hnrXS)hPNDAH+k7=SX!1Gi8MO8q+z0VlH6}SAY=Z0yUx-<|BDNaLewlrC}Z;Q78aEWUlDXkjCgEwy~j)Tm%94K$t zH{Jg*?(>8cZ*~^IZ`tlwmeO;tnlpKmAfVW@8}_Tzo_D_GBwKbB8?8j4dfh;gu9VOO zkz>g=@S39}q0-N6(AWTgZlH^_PdU*g^ftGl9McXNhNjsP-^884ppYFiRtJ-8L`lhG z6Nmxrj{PJbBfiQZ$O4syR9-BOqN#P;5?~BSa_a|9lCF*L8$+mP6x@gkg!I6N%sD^o z_K(!HS2j1QW%&&~%Q6_`JktgX1#niZ+VFUmAe*u8HZ=ens2~GZe76Db*HC1! zV*T0?F<@iM1DV15h^bI@%^=Wkfw8}GCVh+EhW=ZvP+7JFq0qclU|jo?+SaxUJOV9k zskj-mIGzqBj4rq14T1wlx&yFpv|y=>SF6zJMuD^%+KDn;ddQ#Q$mw7fBhVdTpivLI_4#@>ab(jkVjs zVZ*0qOtxc*3!e(Ji?8Y?hrY?M@wfD?anv##BzH_Gj8ub9SZc0;#pH85H&Swi?2HjK zq@2sepm4KWcmW*8XLX0#^wHc%`c-xrji^r$$n_0_>X+dAgZ+|bPPiEdP+e;#LrRH# zGcqdPGDeCYeuHion{2tVsEt_*yFSvbFz04b3Q#6~L;Gm6rRIVnKtF9zTY}u6=1)Nt zB4_(m`O4@!8bIyjQvCL0C58dsuEda0#=#oD7cMoGhSt)M2oy@c@Ktt?Vx`a(n?Ttq zeb>8?4fCquw9@zXRG%$0tGeKJu^?PH{6~z6O(Zs-JGG6H<2Tj`s`psSmpVat11#{V zPVvQcJ4>hbOK~UY1V!F1jII+DxN+YdRKGl;Dy()Y-)fEGoOViYL}zg3MukT=^-{OA ziUP;fCN*HG+F~X|hXJDNQq(eGMy~2ww4`!YU#aH?{UG6T$|;`b*f_bq?UYUPX_Q0l zg$tb^hjt&4Uz>#jxnp?ozc{31V+Xnw(fdK&kL|7kOHI0CUl!|9oi`aJEPu&o#LWVn zR;j&sDzmOPw-IeMpEwjxmvf;yIJq&?bjI3776NO9RGkoS?Pxm)>tG%bn2RARUx zBCKX2HzJ!2s_HTytQ`hBT-TVaC})agz-YJ%{=Con?&OEIUU(*Jz<8MS64bzmZ&&n< z%qBO_ovBG5pJUV(%_Mp56>jcOq z7x<|0QUeb?fYh4M!zQ$+PEKS1Z~=R@taTxQO?6MzW9%?`srNN@J@?P6+g8Urig53? zpsbN?7SlYBNR-zEP+pH%%Ln5z|{u|+Ujy zOat&^G8}Sq14S$OPAqpcBJ)f&><{UP%Jj)<69z9OlB8r_ z!XRarvUGvYHL|btA`C&Y;zmNZ1&0P_sW%}P%pB1o6HGAzFR&J1(gN?`;2j>+aU9Nw ztV5GabUggzfq$QgcOCOjbPl>8SHXxyd}ldhAjY%k99L*^dKyn8^gnKAYybkW)CH|3 zL+U^(_CI8)xcJMHLZO)%envK@S?ZYZ$UsJEXWS-?Y08eNMEaqNb(dSpRwjmQXs&6O zQ^Dfz7p*jrw;0|aoAhFSW7}G2Eg&M^F~`)?iSn8#7P4m%?jk9V%obauPjzPirazSh zniYsNYZ6ihadS~VjCsOXjK!-2VG`WT#zgRs;&Xgs-tQ@o0tIOhdh}9hHCx~4z3KZ%6sF^OoUihP4XvxgQWch0o z_R7+dy(mF|Qj4_iii~c%z9?Cc;rz4DV5_KJH!Y=x_B5+n2d`tR7(;%M6J*=eO&RpH zXz^F9j~Y*ozMYi85+Oirm=ud5(^4pY*;RSn_mB`#K<$W{a-N4OtA@b~%g%gJgNA31 z-5Az+5(yA)PE#Ay3d3#Gj?NS4A8M#kA(Ez!0?m0P>C(PX8B-PEk~DY?-;6JA?h03t zVALVew(Yx+Q%JbBzY(0S7GB} zFB@O#Nx39$go(_!@ne#;usL7ULh+J9!^)+d2-AIVbZ{BB1PI6WfJ#oHli{V?Fsv%i z_U*7Z}VM1w01Ron#>j$lpW5gZO=OIn6jHi zaJl;8Sg)mud%N>cyGXMx#uq8?O$73g@~DZ6px=Nb*Bz3NE?*u73cbM9eC*BcF~*gT zC!-{d(KE`0kD^d6>i|WelzK-z9E!j@oh+a6Aroa0x0O&02|7uh4oxXl;*Va8VP3mHGj+HR28w@Egh3hN((y$(npyIxzD#)=S(N=AE}(A6Iwp2FP(gDdKLWRZ0E@@R^=B_ z;Q;4VDU_})s269;rIwUB#y87{weq)}oP$@F9i0YC1~+rNhg%tGi`r_m^ad6|i}Qkst<}Qk06{J?@*Z zBM5GC4su#GRO%Wh@H>h9HduSYn6m@V{-1ARerry%DA^Z1wZ@T&jPgi(eUkePBypZpKLX)TsGSks^r?GK|Q9zwV`SpDJgJfBDr_zY`cj6gbS1 z=QLREV-vk5{T zNl(7St|4T|G(apCSVNL?l`z;PGc9yKGaWz$bbmJC<5Y7P;&uR=&^|bA7@G|3`sdjv zwd!#H$TBGw%n(7~|6c@hmjQN84A_nrpQB-}DSHiU+O^T<$+$xkt8-DwaX>MWS6Br9 zuXvKtU~^2OO-57_1DsaiE6^0_mWWC^N-!5F@RT47L;8SD4{XJraH`>yBp_TfY$(dL z;Rp>#B&&qsl6#O?TA@wb5pS*7*A=vFi#L?l?Ky_uDqslY@tW5C>8sU0HH2M3bolnFy~UfsF@D*3QMJ%yeArq9J2{ zD%iScDd8=t`F*2ZFo_u)e_(qgOSEI$9&MbPl)bzVKNz4$AY)aNv)%~h$)s|EBnh#- z+8A+ITWQwxwq|2OVsl8o?VN+x?@z>mD>kRDb7EcghRrd?#{m z^2+XMWUZJY~UB|5zf<`9G*(rJMM0&}?otSBPAp)VRc~lIe zU_ZX$7>bt7#XGNRiZh4>Z7Mf4qRo@ID-OV0>KE_&{kzM&XfSgysMPJQt=~!|diwQL zqMPeDh?DMgZY-QfYhM~a$_0L_kWnt6Y84C5pJ9_I=r$I9zk1F(Z=c8y=yAzd5G0S1rkMgrFj3UVw zu?Ia`{nDs&T?y-14|?O40m@2yEOHtvOqV`$G|&3YmV_5NHoy6{UyIkuUEhzGe$&=m z+*X1w`O8Xv`ojGszP2hn+t+_;Zls=Tk~>L-c14P{NA2IwqxN}GF7LuZ`d|sPzU^#$ zIY~~>rosB^+Qu@5=b|G^kj3MZEc?`kvGqbM`2xdZU`dMkf%ix-(l!AhPdIS|?cp&s zJPwmyoT3&zqtjwk1z#S#TF$4J!&pCpT8i;N)PwCo@Z;|G&f9|>&^L$Xc!{p5aRd<5 zz~zF8HV5i(j2?pMIU(Lh!RaJI_k{;o7MLGh&*F}Ycc9yBu>a!y&fE6~UYj{ix@l8} zxuOxLkiGSH)$6-9YN14%=}OGM>SF8bx`{}dnQY95ivM>O&*GX&PfaBl&?_?h>`x@Y zUrjTQS{x9I(nRrct21h^OVQ-I5lT5w7WXGfHc6(J5-_-qhdma4k8|@9eM>n+-&s5y z7EN&4l4wgf(-_QMftVW>9*_Xqx;Ymb%97tr;w zxPv$R#3#8L+wWhom^zrPNTYPCiYPb+qC^uYY@TT8`k7B4PcFq3hCEJjmGIpjYF1%v z8EC)$T1g^guJf_rd20EeK!U1+DV#|Hx8YY8yKeCAqrU;iWEf^+C>tDGKuSeKTQ&j@3z1x`-E7^2@Y$Hp$X#mJjg; z`vbAUT~dqW(=;QeEb>hPVT?@Fr(~}T145#>5mSy12?vF&3}N*&6#_ds30~sQac^dv zE7@5a9j7rY6?c&AbUkjF;9&BEcxt)!N30on2Dq^k?MmUa*$JAP75gcMf+8oxETNmj z2#%vS0CwY`iH1B6D=j*YC_o|4YF_YUSQXl`ZsyiN@5K_cchb=b8}=f}TcYf}pA5*| zjP?LIMF9GE0N?nMBF2VtO>~#;(1?5{?&UOG$0u|cK%`&gs-$jcNIdaHMR+x3FoXg` z&t*I%X9FY_+#mF=Lz3(K{ACOzI0}AYA>&u=?-ibhAlum|##wL&-!|;O*O)N&b{hyoKC_@5=Z>GOqCcADMOB<(cr-C-EE zI&_UScVa~v2NSAyg69G-DU3lsGO8Sm*mHt<5S-)F7g(B&XO9c1Kiw?{M)ptKSPrzi z5)Hz^2Z^(kf6+sKHJoiJBXG2egAgMg+l}cpUM;1eF*LbtA$v`hGBfp#A`?29_>^WB z?9fQ81Vkc`)2ElhJlXl`!2?);4<7KD)2qEVd<~!P?CRp9KsCS^E+URi!`ziz#IZ5F zvxiMFT;hfOIL1?s2z_vMFZI|NUY4TS!92=(-vfDzyQf6Cx&Kj+uMOvrqo(6ZMs&=V zxs_#eS%h(1;kd@PU#dLYi#+JsB`c|+34d!8PS~pxbnQ=B+)a8c$MI9l+Y7m7BRn8c_G2C4pxLbaZAAL^#plwn_emZ;y3?!-jciP;O)CAi2xuC{N(}qYD)v zD7c1EI;{`z=z~K+!c68QVUzydsJuyyo)zq8?#2lKMkygj`R-RNMy|t#`;6Ut7jpuU&(EpO{e3$v$Apud}(%qE~YC3QLtCYn`bLjla#`?yS z7W8FF9K;xdI7>=sFl?)3+2!GcZ-Vv*nVV3gq$JvtCyCSC_dYl47N=_fYCES_Kt4{S z;->FXS5^M#jTo9rOSm=>uLmg^eUh|5ye>}tFU7>`48chVRX2zKAA9e*)<%*givH&*(sb2G zDhqM3T`ohrJe0v*OqGqtP}S3$W{U(RkY%A%DxzgK>%O>VI~eJRijVaaS$g!jd1tT(nv;JveCm#K6-B~2I4S?FnQRrN zOWsTH)1x5bewi1OzV0?{Emb$bYY3b~IM(z=Uc#W~kqzEL(JDq5T04sf_P&SnG%@P2>(BtuWv zKC3XdDxH1&yL5JnbT$zQEhf^FteBPt8>i${hNpp@btq*T1@m%}pMlP*xr8nNBb!~a zP0np1|EILU)?f^-4)|q4HbU^0W5Mq3_RGWW?$5ig%}Tn5e|fu`*gTE3sVV8h5m29v z<4QrZfmA~$qlFl9%OI|`kEZNt?8;PU5bE8YuK#OkB7Q&XGc*wlG<}?ZrS#Yo4-mw7 zAlfB!U`Q}Zq1O{WG7`uhc@7jONOtW~oO*^j06mm(Q|86t{|DeoNfk(zh1UStb2I;) z-CtSu#g3UwOXI6d;j|1hgfEZeM~55KH=OYV@>r5o5%}xL5Vf5-Q!q4;>}*Hd*ej}}wn_~y=r6dfT|i^+brdp&b1CRa_TBd+XJhoy_!$Z|l|m9=%jHwi=M zsuZBn)Q61(gQGH8h?iahf1D>LIuiX!r=clqm7>(A_`vD`Z~2~KHT*^LrBG&xnA2L7 zdY0Uk56*Hs_}b}q8MAoRG{kKBuV}k1TMrNl+pf~5j+AuxN;X&jXXl%vzy9^FtH-N` zF>AD3_js#8>uZ$;tHNDdApKowu<20qe| z*i1zwMeUVg*KW+l8kB+wM#MNX6_<=x()6OpKptc@JzwsYLc_Vp-r1g zn%MI~3_a`a8X7oK#&pI0yEsR@|4#VLqi~jR44e&eYxQMQ{Sk)3R$m6z1sKIcb~KfV zm*%hUC4~I!gK4eRCqWwLC1-TPGlk+kPS#DY+6DNV!y{K-8VJ4zK|Q^MqYN$L!4qd2LlyDyv7vm> z?Xz#_xZVv#^^pgdvb!+G?Xd=jCZ8;uIWObmjPFD+`vlJfKCBar|BCUqP~TXp+`;(f zgLGKr=;b8{%?l)1S?&u@`^buJNeHYT!dnX=OH9MUc-BC9n3;N7qi=5Y*%HdND3ajWe_nYhDU zvGkkMa9l#te)x%jG(qMfR~cF^_$m$EEn9Xc}RsY0Uzc}s*3iG zQ5l_bh0!`FD*w9ty2RA=A~Qcf?ObIfC12q|SMUJ`Uf~KZIXnV}<3{xzN``BXW?R85 z6j%v6&~aDkU~C@$kkV^u)v0@Vb_b-YfW4!LPjXr~W_xb82KA22{qzfx2e%1oXb*sZ zqp)-SYgIbT&(t6WIP_8u_7U8D!trBf>+q?QwzF~k8$-I?8sf+ha0fU|Jcc}b+T(&k zM+nZY!Sc|>oG2bcup69LHla}*yEE~E6MLYv`f*GJEw=I1&P)6+_H2dnUb5XaO129X zf;_v*0f<)q9BV;lO64J=pGGqrvf}(_6PvZPQY7}QVJ&aj&K1WJJ`}%Z)_~qJQ5>cS z8igY89MbuaW7u(=6@a3vvR%SIRTy~#Bh$pGC|WevY^%q{Qq?V)^X>qWSxR+lnS^oW z>~(Onb^R);(<>(~4CZg30NMJ&pe+_nJy)|NQLvhtWxd9k3RQf42_l>3gC-QK4mtx& zDA4Nk42tp9VzV|8dg}P5&(n^Ns~$Z=?i}e~>SQTQ1}_p~6iaEzfP~$^1$xL{rRXUr z@*;}NgP18NzhBdvH7O%aj70%63ClQ|cq`*rbZogK`+!o$U&|GDNZI0{kf82^sAHaw zPLznkZymBm!B=6AP!sEavOtp&tJ0&|CT+Vs*4e@P49Ua>s86(ehcvB5iBSk(VnEWY zy`AE!cvs?J`XJUi6+Uhc;iSNe)(Gd4m&B>CaM3rT;9SqNk4Qnd16X#zi`_l8_Vol9 z^^%jG8s$XP_MN~s3a>^e2XwMlQb>70z_c!$RAZu6WnxxiL_FBUhzoN*uCV3KYR!GK zn`3e2pECqJiH6 zFooDgjIM_Ku}(~YY?xh@kB;>s9#nK5m9M<~3Z`apLc?J<={!j77lWHv0`h>KbdJp- z;BCR~S`rxnTCN2M9A@zl2yd_zW2hLS;-VOn(GQBOSrjIuy^U#sGJZI&%V=gJ6V3;n zWo@e9zd2R}NsnipNHX;O#TZMrZhRH)yBfY4E>Eh@5Jg26AT94mbcZ>un6y6-$>*>l zVI9UrtHUWWRWj?;pErYS1vRrCPRh$S82D?@OGiCs;Bf^_&J3^T>-%Fb#Aa!nXVDFw zW0X`8HMg?DSDe$9V3L$31_A{Hc{<7a+vNI15T9_hcU3J(CnqZ~UP&9#2m1zUG z#T@s|?-^(lLl9qicJvdnnB$K2clI}v9f%_93`ohs zf@IJMq-ml6cf~bqS!pRHKM}Jhgm~`DtE@PL$Oc5IP+g#7R>;oA9nj2&LF+??|HIh*BFf0dg2WzSGNk6$VR}aF- zLhBSXxGHiA!9%(`&p!f90$&vH*cum9tLdoIvU?MG_B=LogV@Y4-E+WeCK0CCC40_~ z>7Lac|2jI^kAR>~$!&YdQZO8XBcxy|1Pkze5)Dona@USm(56tQ7l1-rfgC`hpBMfK=rlauu?_%)K4XjKy_#f% z|Gh7_LEqw(#eABrwp4B49d>RP?t(hh2D8Sfa;HP6C(H-g^={2OqI#`;Mi#vLvu?hd zRYzT;+nT%YYPUQ^tt)p1?NzIx*scB|mUA2bTE<6Dem6dv#|?V6VTp>>W$J=WZF4v| zHLI4;>EvjUjr@oLcf#E^aHO?*^@q?Vv0DoXF#>D#&EzFW+XvPm6^TL0go7H135BqC zR@a1c@pMdB7Y5-8OY|aQw?or;E5UerDzQ$q3l~thlf3O`XbNuC>UiwL7W`|Kda%>b zunvAJ@EK02ss66Yw%jMOy@Bl4PKpzgo$wWt$-~Uj(-I6T#^5J|)3uIX33}r=i;5br zcr-_cej#!lS{1kmfTlaJ>u?ELk$Lfw7cqc4)ZP4kCYC(izHn;b$AK69XXv^G8dNg-J-?eNYsDp5XD3BbVflu=%g-t>HdrHffgkD!Iq zF1b1Z%QD@km|VKZ<1MrPHEiudeW4PMZ;}9VF=(>)j3K{IC?k_32lKmI#OXBgwOkC) zz~iePte$&m0L#zfpqMP`qG*Brx!e?!dpj!HDc#>$F?l{m z*|)Ox?Tfe;`iJZt8Rwy0+KlK72I;BYv9TJg_qH6CxuZ3x{rpT38PeIt3FDW%`&;-7UIW(8iT22q&nupHQ!YV`-jbHAT?PGX+*asIC2==KHJ zyGLKxG3xiQRr5JUeUn`Di&HZadmTYB(Aay2yYIFS_uq97{_^^V{h#OdPU?u61Gx~q z+WzIgy4AiK|Nie%udxLJzaL2W#GL~j(oy~_6w?Cpe~Cc`KLtA6KafnM47N1^<#0SU zUic5reVoYZ~cge~4R%{+{nJ{NhAQWlN ze?L1PlkqwhL9R&*j%2SCC}<$w!WCqk9QHF6dkF}`v3vg!(LgQ-4E1zQjum68EVH5y zS+fgWqi*v<2Ha?wsMF+_n`{Qzwtk+Lb+kiN-@BU;Jx@00W9Mf1{@v|8-0<|hv; zC1IEq4|n?++^$eG+-Ar;Psn@`s4dhqWk=ye?Lfb*u`t%t5oYIwd^H z&9!#2vW_^TGU;GkXxtfP5C`iW8~~LPigyPDHI-$a+pi*WZ{uuP{%=hQv=YE2T?$al zlm9z|+?M#_cZ*XX!vO8ojeRJN^dB!$m2<&TsT`I^CXxd z8yE~n`3DqJJEgp%g#lx}Gq>Smhr@26S@&c>Vk&B4h;tAm=>WLem%{~!+dHp*K#82Y z9kGr$!iVZ;r{G*5Hf=1eMz6qXH1mOTICO$z1wNJ+@4eYO+yrtIZvV$s__LuyIlV;v zCos4O5Q^%}{#pej=e4{vk?#y;VyPi2?K(xQHbHg5dC)~+9Zd-{b=3a5>M7`$hN#;0#ft9=9 z^v#$l0#kP=*GJ>;2>Sv;W6q?NNuU{3HkWuWH> zX&BN}JW`_jhG`0q5^QWr7Sq{>9HVAJI|_Z%1MvrB z)GbSN9CMK^94xVV0Q~`_ zTEQrN5Y)l`fdi;JGHn~MUoIx?OV4EnaA>9sTxrOF1IKLgW@A!qb-3GhagM`-bVeK2 zeb^?f(aj1Bygn>I zhM6N`_cBCU%nz}YSoerTkz3pey^4$)BhbnRCAfo0;A>FQ#`L09Vhi8o3Mks{#UrOC zm~ za~Vv{CDVq{D5D{(nCUU64z2Q)BOVw=!O(B#d?u!+tnxEPWSFlzHK_| zxu>_w<&pAJ98Q0-s3Y$MLI)C^K|CsVo9JBYp2_k9Jr`{{>|7Vsqq}(^( z0JOf5OUsCnxLz3`xGx&(pJtRkEe~ySG716>?$c<{OD-M6^iXuQ(#vTi>_2S5xIigj=3aCtBT>U3W5e)@#TqoK6kPX~E zy8#HmDw~G|LJV#!2vMdiFVF>oJ z596ZfmnWpr=Bye(|G@hy^E&}Fed2s@$qWRDG6s`M zJH-Og(!n_SE#Q8IZfygzl{rZPtfV6qeG+Z}0LqcRy3aQSd&9Q@4~rbe`hqF(g9NF; zBkogPg}nBZ2hzJF&tiLtJI~n9NBMCxDyN~fRA~)sX{p62jwOveeU9KP}OJ=+FN>D9FNU8RK1B){4 zlVBu?WVrZyHZaEp$e`7%TyagS8|}VX{ze4TTMmU369XH|zd$fj`R)0(K`WbP#DX@l zbZ`U~p4>rEYe|$?qlUm14{n4~;3RQqriaKP<-AN(wc}60_Q0+Ez-T9MmQEt`u$!yr zdkZv|*4mh1PZY-}9oZ?m4rQ)zJC(dC*l4H3BDRP;t|^1c`D6q#AJ=6DPatmTxg=$? z?OmU4QLFq*Tpr?WUUznuAg+flX2cJXQpL;`ZQ{o zWiCINW~t*sr@p?Xzw=zXQth*wncexQpiOd}MVg&wu=}$YkDfnd!A4eWOU`9DO?r_1 z$WtJY&oc+Hu^%u%AGj@|QYY=R66FS|zH_TqwWeyep>0DIIqX@jdw827tw9$cGc!FZ zJB+-1IhvH_y`j`l^JmRdb``dd4#gbQ(cnz2;4^k)%b51LtFWhf=%X^82C$eISzNKl zB#ULpAvGh?B|# zn*DxrX#rl~$OaEn(Kk>)0swQ$@f;v;2yu_0EJrW7W(P36 zAgvdI2!I&1nIVdfL&(;zkg(tVD;e;grl(oIJA$wyS6P=6LYlE5H&Z=n_xRajq|war z?qO{|v+qUeY804tQCn3{hd%ypQIdLk$4?g|)eQ}8P(S#0b`84;4@>vAa>_8Uq4ZCs zwjUx7cvxVJ+a}wX7L=;g+QqE`BBl7+BD7&g09M0LOid6%npupX9$FOvS_N!LpV4rg zB|0;S%ejwHv(@}`tETKHW-<#D?SrgQIOWLVL?KEM4bD+IZs-+%&Q3zZ)i>`@AeFYB)^qi5~Ox7vT@gDeeD+KwIKWM(LZVYdpU;PajZq(miSI# zP)CShgt4#c58r}rbn^~-4*qyy#}^BxeJtN%qsY4NUyMTt3+ zUb?Vv6rrL1?k)(t1!iy$P%bdqqJqTuK>5JXAo;ld5PrAd&7D2qJ-{}*^Tj6pEt4U& zH*!G=+V*>oHQPQGo5E9g28LZUN~>Sr`se8U9*gulT5K}kb+6L#I2%E12_`5>oMWJBv?B&$B=fk`DYyERQ1us{Hv#7Kc|4Ly>iva*tm5Pj2# zRWOD^je7of5;t&6N+o4rhB>fd=ql5JB@qE^_j(lYOa+4N=jYkk%~@Z^ooVXmk{EcE ze1uz+$G}hZP}-q!RuB>sB?=zkIK1fy#FfsrdR4 zNd9a-v>`ci^y^_UOm171S>INdWnp||$q*G-4X=e~;e`Kt9QY#nM`Ua1b{>jJ zZBXUqF;|ilYRSuk0|JqgATBKhIN4>F12n;Rn&$jX!nuHcdH1thZGHtB9ZVU5@vB8I z#3CqeN?4>EolV>BIFV)6MCw>fVn(T&+u_W9DLI574+|SYUYo@*1AB5xMeC*ly5xoC z!nSHt=5l~Zp{-pmjMSFYq>bYI95gG9cG5_%)0+l}dK<+6%Y0a2FZTdfbzT_-(&kv% zGB-)Qo%}gFeRud0ov^%_Yt91( zIW(Yylk+0y8)KKPKu{Cu0F>>g2H(O1#`(Cxe;RCe)6v-_M`tVfH48xb?#F}A2g#p{ ziwnvw3LDu#le(+S8xc#s&_M?^F5r~()KDlBLooodO$3E{)6iIVq1g;3SEmL&-DNfg zw@;tUAt3+HIu|5kslbwOQXqXo&Fm4@W*_yB^+mo+;m6?Pb>16a)_glHMm<<~V}1A$*qywWoc7HWgKB@@16StF zjE~o`c;mQmiUy@|)pV#Rtz|uNaADkJwIk~m* zGtbGLn?X(60f%C!8E8GIH*El_VH=P=-9}B%Pu7BpkEZm@U3p2aYU-MYQQ2FYzreK5 z{6|Cl1(usPR!E9XEV$hQ5ti(b6Jop9yPTY&K&K619LysLzg;FRHXwAL(8_RGnlHml zf{!|usi1kJ)@=WMF&*UWPkRWh!FH&f+Z zj#b@gbvWTEq)5GhECK%Xp52dFk53QO+;En^qOoVqEwYv%7HIQrJK3RA#k<$6mxC@ZH@H;Dv|NtJAz6w-KM_ zv%62ToH5P&F(1)sZU8Eee_5;j@~HO9<2x*xlGjvMc|TSrKCkuK8PAQ*9zAPy&YKOJ zho@DY`8>={oUd^YTRyIkp842Nm7c!}J#LPmG&LS34Vym9cK4v6^Eu?>=pCJh6`D0` zc<90XgS|bH$T0V#c_58EX8v6Ho-%p-fkWlN7v%Bum$UxF;3UM8<6>_rrhLZubX@$* zi8kWKHS3RT#ch_6?Qsadc>Rso!EzAWJLvAcdHM4%JG%!Su5~rOB4Y*!w+%WA98HJz z*as?4R!6m235mgiWHqv|<~|4fQI#YJVX&sA9j~2K+927!B_2mU#6dDyCI{(N_AAu- zABHMS3`kN8!;uUL&hFRwR5q^01@K}_!cbx^zgZeXL+m)L0M-4+UiJ+$cEX*<`m0sTc6EdBD2094Q~$h)6Pa`BiSn{|CD4l zYAsRH^*UMLll9o4!#+AW^##{e`ODk>z30S?KxN?Zid7o%-MJs79*87xZPO%>kJk9w z0qX!!m@tuF^!CXEZHVK;g8YpS(~G)(H@LPWr+cfUSf7wVUQIThbk@zLtasLvuPgPw zK61GhkDIL;skii8?r&ujO~DC(t&y?ip>;6qbxDo$(!xhyY0jUL{c9;(H%3X zk?DTI)|+Yzn(NXdiOzhGSI0-9b59Y(v4b@aZ#9d&>=gr3nO|FVx<82!5&zSMEVt&5 z2M^%C@keTX?S4$u9bTPzD97aHGN3~9yTjAC{(3>)w^ezNCv(n4@gCBzO8bK)o2?!t#`zXu)o~@jxS9$-2 zqf}Dj3`o9$5kCj7uuiRzi7^W#9X1_z-t6w~P;?|8cu*GZPp~>)%mjm95f-Vc@lEZY zcK@KKcujrF=KO=;Iwl%x3{)`C7#M5sy$A-|( z40I?zc^fiLxBkg&D$yNNpS(x%ddX0Bv%pZ@y6{xw71gK9c% z1Y1E&chgc*Kbf*ddU|GRU0mkBz3*QQis9czv2&LwB*SczgxA*lCVm56XPps4BVZ_}kOmZtu# zDs?#!p$ih%zmde{@Q71T9~{}t({fr4&0d>HepRDlU0=JOp%b#`Z)<5@!Vu|TI*l!u zWDYbqpDN}FobH%T64F<0)+jj5x8^>&OY&cIh-C7LUkVs0)nYD=N^?_hqr6nhz_fFY zv$1@X*=jwxqqoNzDeT92sn4NPp`278ao*cqUk5yVag#W`={^Mw0&M*zg>+ts=JWFr zrurB_PM<-qxW1FF#oYKebzAEgsWFQl5P5gxFP1LRs5GPJLcLRAq9RxW)xxFbSN*CC zq+d+=+@wAL9YCH&OVoOksFGJMA{=icEL^B1FMOJ=q?V>rDc<4<@b5?GT#p-@xJ;$m zHwMRJpo++s#Dqso*VFQj8-yp-VzK5Xt;UHXfWnR+(|aohz&@Y1#}c`xH_7h7w?bs3 zXAm9FF@B^U_==qh;%7|=A#gjcRZ|Q5nfGfES5#E%PJOWOUT%~B+c_{?sr-JGd_$}7=_NCjrkOwN zm0{cI(%_!Nw{rb{LVO52Zb(H&BFsztX#dX`)!(4^d0df4P4LMIGUp~5)C!^^!;J7ys4Ppv`nl>_rWslOq8cG zl{W>A1t!PlP&FGC;tfz0e!4X`J5B;3K2rWIS!w7-d>X(RPQ~EY$#QV~I=sHoYKnVc zhmu`Jt33rH3oE9wvT8N+7ObqDY$%6YQ@~R^I#$K>lo=)hKVs$-NHA<33RHyHnjR*$ ztx!l{69tYIEFwQQrI&ID)xlWP|ss`<1uo`99LC@45(y?aqukeV<*Xqn8E%+vWl0fhA89^!u^* z-5PeD8p6~3b$WhILp@B^QDomo%y3%i3o01i1G_k*E>5Y7^Ye2)bT7O)QhS_-nQY2+ zHiJ4u(d2EM!UoiArpby-^pAfuvVkCF+aI1AK0>dFx_6jGpr#woVCM|V?=DOma1VDI4Lh+0#w03hPr7dYXC|!bkKbq zSogDcM5Xq6XZ=ZX$hJjygx?)t<&&@Z7>*!7z${b&Yiy_&b41SdKY+*D7QAo6;<~xf zPM$n!nKK2aK6$pb_H_N(vyCT@pFUoD_G~>0r28ofBO39v*E=kJ8I)(m5FXk(dh(go zIE}Mm9a*v_=4hgRtu?fCV!_$W8EVHtGybUan2inWBizXbV@Q+LY^hr2B!$&bJf-Vw zJgd~92)SOZM{8lNKoLKmWZ>IY6T4}PwL{xGP0$_WxOs1G3G-+WBBVR^6suCNg~x|v zUA8Kx(yRUhT>|9w#tUex1I-AdRa%(pP;No+1SQH}sF*Vn!!^OMSY@2AC?Zxv3tvyqsea3>#io(O>0mS#UW(xSTK zR(0+EdZ}G}y^q#vmV1A}?Om4p*e&BMkUk!1(+hN!K=la>!0 zf0P<8d&3pRp>%HvD6jvSlKy#&UF%LdwXZJbarAwHZ*BbaKWjA`ekYA8$`5&$VAAzJh3xwn%{d3hA|ulOe1=rHs%p<;o>SS z@5eny=*6$nTB#K1J%KqEnr)pZt*6YZ))G;5G!r#5u%sVRswt$Gm@Qs_Sn??)nLiUi z)a~b>fC5@aiZLCm&>aziC$ufJ1drGkN=Si(Gs`}Qor06>NE%uF!{<~-i2%ax@w;?Q zqK%r{xbazSbPd>dU%uWxK-aP*7Qv|np$J+Ix|&hEj>cYALS_uthS zH&{!9xuo@@jJBh$npjFqu@YQgd(!IswEGtxcBXJ2nX$`sR4cD~B;dH4k@6(36@ZC- z^!5ANnLTnOwU}cooR4;Bz&SFRBIou^)*>2xM@sL=01~Q#a6wpKLPK&V)*&p zx!zhEz?uci0VV2eifyMgL7yH~!`ifiUaWpc*4G5mU;SzS?M~gDuztQfv()cjy`Rsv z2!_HTMZt=Sebc}E&1zYmzLb7N<`D1!5rXyG#E<`Ru>Zz=g1;D4WF!g6-=1@=fBQH3 z(XpSX>f3kwhx>y?40x z>#kH$8}Dp@*`(ytZAUDR_OOf`ji?qa>s@wXxWf_pz2J>+vN7n&h`&g$+x@rWpE;v1vn@4WE@7|hgrd71ZnTUEP+qTfrl2fa}N_QWzb5c+p6isAy}!d|ToS3i6wlj>z- zgVJB*z31P4_f)1wu;Od3A0;@K5zVN3s`=VCdv8&@(nuQ;*W?29ZBdU7QraCrQ+saaG&6Z)V`k6sks;V_1GBZ@e zRNA5q=omGXsx?@yR0eLlxwXg~srOdQG)C#r6X=f93%4Y556nk*k&et)Ou@te=ku-3 z3Av@*T#;PORM*m=n=6ub)19|yOWhoo50Ja=2yq;keb-8$qjuu7=xI`GrF;BJ@TW}m zE1v^S=7`X+{!^hP>^z8f!L4A;U9;Kbu&Y*$h?RoffbMcH2j%P*_hC zD(-ZgKd0}QYHf)L!?#*;AnEV6dX~X0i5wKT`S^$`9gAU{Gw5aqT0K%Q5FKojXuKCuJeZOtlik>A~KW^<7xC@bWTX_DQFR8Y5BhbeU$G@_aRv3*%El%QU;{~u> znz_AQTNji-72q}1cj(NG8lw*M9Ae+m3D%-Cc%8LDCp`kwu9m$_l&QhL%Wzm@m&{t@ z`&YYF^-tixck$tc7IsE0TU6=Yd~j9Oy@3M%qP#FYZ~CrEU-@=uJKDpu##YVMk+Z@G zF4W7+g{b0aaZympN#BuoS7sCVj!gIFBGA_nS5!m&IMIFGd$TLrN~z{@a9*9m_^M|&wi5YK1L5}c9A#| zFJc4c;})iCf8Yo&n!e*C?S_bhg<2k>!79B}HI-{{cQ)#ciZaN7ZwRzwl*CN(UNdx1 zU~2;BHn4W=Sxvb4HWN+qGV$8Ipw?H8nYQ44OQz;dED}{nC+OU>Z%g4h^ zG z>)bv{dek*~wBCvh#HR~a8`;IEHg4lt%R!ET?`>=&HZijU(Fvg zyu(QV@eO{n8mX#M`A0)E+x2bbI0~3K~l-aZGDNGzxN6QqXXS@wcrO z?1=kB?noMl*IB>+)1bH>I8dT661K8anptONi1#!^Sl12oeyioA)6KBn(R&_srm|UY zKfetusBwEM?-lsuAw2Jo9(x&ZA_lZo`e1BzTT;5!@hyZ>Ck#y~p`LmihIhLv3bzp}5^Vn(_Ifu_Iu2Tn> zBcTVsusGpKnH}I05embsRFI9SrE(5wlxi!}O9l=vci(rX3$}nHYmQka9Er8kVBWLs zb?|s@qllzp(GWZ(e#ejM9N7YAwXk1P7GZ7uzQ2vTG%0Qi#(=KtDs0w2mi|yrr+H89 zSBJn4mEN(`@A$iT$3nYDy5>)n&bgekMyw_qk3nUg_$mpG?Pg8~quLStF3qc%9fP5d z)nv>#m74kmcDhEwEDo%K2k=grxxGF}c6%2Y&{H2WFn6R&M+6&Ww!>+IV+dv~1izZ> z^rw70M+5LXp(ek6N0+^XNCKzD$E;_At2x=XgUr#vF}fEwj-ZR<2Kr>X&!3XiU^(;5 zVx3R7;dkbVz^RrifY=+1fna8QV{LVX99$R9u0^r3qWqz%zD~kB@x||8@7LoD4$*$xr$yp2}4&!!U5rGx1r=k0hcPhwQN*HTNFsVVqDGxyFtu) z$T?F{C7BOlC{)%|IhdVvIPBjv@oKiQnfOb#6t4;v&HR{Z;}}IV9XBs?2+UUHJ&v@Z z@whe?!A2{}x9G<0q*{Zlp)q%Zo9vVLWGzv5yc7iI;(i_JL)~qxVWhKXXl_5F3#ek+ z!+#o7YE%MrICISokXEv41>#{quIe|I&u!!II!lLup@eyh!}}AcL<` z!h^ofdU=X{zRs^Qpr*+!0Vtf_o1&M!PKRyyFV=aR=CGo_nWvf-@)k3U@_(r1!LNTJ zXB(_w{(6JS>hwxY;ahWg53&oXgKly1hXRfHi=bfELIr5-{lnz^oV&z5IJnF*YJQk~ z9Gezr?_csR@Q}8gY88_+oL@t;nVMx}gFx_k)2BeRN=Xj!ZG|$z7=zEfibuH>pSo?a zRt`td!d4ivMTM$pC^z?)!>yk78)YT<>&QFhhWf#TctaA5c;6@7&428^Jlr$X-#4qSZ?g)Bd1acLQI`@(YCXKcl$hqtSbwwv zscGOmC$G%Yds-Cl+sWSGtOGU84WZ!z^_4^WMwdHwU{3Wc!oEv!lZBpFOi`knr9l#W^K& z=HbSpEbAoO18}p#Y<31AUYIjJhVD$=q2cKUW109_I)Fqwdf4Rc_TlcE!vk;362e^A zF?pG*lA*q9V^#(0Q}Jv)kQ$ z^DEkGfXJ>l5$aGb({(x;P&AF`3)aG8iH4JV|F!q#p!?JAU%(*iD*u=d$_7zVKVq6@ zEq#TU&BZl>3fX2THNZ{>#o*?ufMmH`w*>0=>@w*U80^|~hH;@fAn`-Lan5ase;1U! z6#)SnF-rJLz%~bITy$La8FM8E~=L6q3eqdLGNz31I{^OsEGn^)iW$y^d`mPem zs|kY9O_G~(mP1GYiJ?{-{7cHg;OV9N2!BfD2FpO5J;U^&NnUE9a-QOWZDJ#vHM(u? zqj^a4p1fAn>6@Y${xa;D3-X_nOelUt3y^;GZDB4c*c@Q~deZZlt0QqZ%tbk&Q6Sq9 zxMYsZ;MqwBTVsAyGRB6}pY>kjd15`79tI~_=Htv|y(|qipBbckahZ))`q_u9ufO4R z@NWa40#j$5Ngvdud2}uza)@h0lcKMOg~TXMF;5T%%rz%Zt>&a{&_-E;&3%6L}}}x5_RVflJgQxtpo*T*ep#> zC{z3i?|@zsf%^50-Eafv%7mJqq;kSZ=N+>xY83iv@6sQ8`G@b#f!WJq3|4zToEpA~%ojGQf8Ki~-kAA{DfvJ_ESGwWP*EIeAB zGt=L!2rXlfTBHk4J)o{+wnuj)YAFZTA-E-YB(J7}d&I+Dp)aPX$#;nvj&; z96}zYRkLQA<%~sBSF=}*3Xfb~Ojs_WAjvPEmov9uuIUW<#klK)_#OHpbK<`5KP`Vs zKu6G2VR!L4HMrqk6ICztwArqLDDt_Us+?CSuPkYA@WF7TJ-T+@fd#B@=q58@uao>r z!doUquhQWVE{5P2Ae!8VI*ZnZRQMb4Zf2;0dgsC4^4(S$S3kk6Cc zs_#f0_q=VEdCTtZzj?V^MZ5j%0(R{lNS80EA<~Id?%`pe?J-*q3=iLZA2}Mrsseoo zm1Zb)Lr9u~iG&*@iM94|C2TcD@aCbU$I}~c-*J0Sfr2*lF33-Y$ZXkZ|Jk6NUj{g> zD$1;RcU9BZ&~WDKI|XXs`-$mM88QelC`>_Q2WlIVAt708!S-WuZ8*76n;r^!o!r%+ zcMHaWE(R<@;up}S?FklZ?lb`o#x@rTnTyE)l8!;pz#%#)3l$Fi#usC^L9eMP*f7 zcef3VK5?V$EQ9Eo_(l*4xD%{LaJ5k^!~`t}nTa4WhkF7t2411Vps+VYjD%!B@^Ug7 z!L$fp1JZ;zpCt$rKjf*0yQ>(_JM8ahVN4NNBnUp`SVoC&_$>)gqc^K&@Z9$E_nBe9 z;3mTwHWS-r2R^-oJxRXTIMo(=Z*OY~=6th%xVs4?hrKU=DGfORusMmXQvkRPfx}D% zVW=#VZXn06aypHHsvLf61aO|8KbsgO&jMF9lBZE^^4C+be6zd3cH ziTF-MX{&h&CrcvH43eIiBt8J@@BuX)V^rHdi4D)|iQWdJx=$>Cp}vLXiRH4Ol>%S| zp-jJ4hB^Q(C?p9Qvc6f~MpKy=Rh*qoMwPda@bX^_#{1?QVs^jzCc@7)TPH-pSkmt9 zDDU$eDXxPAxIvMI)ioFo6~QT9)mz#$`r{7hHatjorX?F?gk*FCCga1ORzt=p=5p%!HRa%j76c6V}d2|EY``UuI>K1Lz}Hp2SJX>EyEF6fb|J73F#6U3a|$a_^4m&l^u zFxxerpm2ng8`K>NNGwm5D@|98&c#=PB|BL;w7ObS@l*Fau*g5Gj%PAPDGT?s%!-y} zbj3j95?Lk~8bC5>(&PEM9fl%yDLQIasO=u?zWa4|r+e^r_a)dr99Vr?FE5Av6z$U$ zVe2JCRaKnHtj@}+DwuxNUu^bD(ppiO#Rs9UzDaIamLxH2E2>qsvyy-2`++$zdxLRd zk9=(Gqg;pBm+<(vdI=GwNqjYxivGgxV3tAuK7eV1Ilj=>PIf+Hur^yo&4^ zmDXTK!vRz6_@5X6GaI{Vl~4SoMITP# z!X)kUYct2J_YMUVl6|A8F8!1~+NxUc$$oKr?nhmjHMyX~KRLXIN^Kp7p{?*G54#0zygY$fv4|=|0=+@FU)}U-E zn8sAtPWo(?CbP3I5v&zozI-6H1Lt`v>==Gb`D`_%T}yA7p@K!n2?{q-tH9~N8;^!srl8y1+5NTKwT%`!T!~Ck-?~JiNb#DFWRa9a+qw3% zy+)0%&6)VRBs;Z}uR+a^ib2?zEvpU$!y~(EZ38Ew+mIwBn{BU8qWZ+DuV@A=i}IKAplT?^+d)JJC|3E=4&MI_L*mB=H+%D{HSksBz&v(JjeKB z=E9(Lj@M6zlqU@5DKOLrdEic# zAiwISShh$ufei&g+yD&L9D5QTxVyn$bNg_=rhIqS4U23NhtG{|Q85Um9X=E~r1XX; zA7)}> z*WNZ4Aj#z6C2uF`>FkmXu_;G82;Yb1?L+q)`$*!$t6nBX(QUxFFqU-2Q-Q~*pO<52 zeqxS8dOjwjx-tuF?C~(V_Z9yN2p#9Fq{yABAg@CcsN!+{;L`<`YZ)-dQbq6&Vv`w! zj0#99^1&qA3U4HILDqKWrl~dfOwOM(QZ%GQR`5I6bJ$M&Cq%%Jo*Ig%GkbX9$T;3C z^g*QSmZ}hvGxh|=R_z+zg%;n^@QCc%EcjNN&@!q_JhZPDR~T3y7#umY=Z|82(uLgA z)xhreya39NXhm?rwXIl^1g{jZVInF*2J93(4ZW_`4b3a>heJrVvnJnxW$Pfa-0JA| z8=ZvV+smB2Ii{yH*LsAHA{gZiR5RLF#XJy=JwNvo(jp>Nauka@S3y{E-H;$9I4pUt z9x~PEzx|ske-N2>SRpvD-e)ugPj6~2amVW}#?r_JPTwc` z*PMgDv1U8oziEt9s~t3G=%@adK_(Xt$sFbua+=%=vJi-NLD;0NZ%jWG zVo%R8{4(&x9F8zh`}C@CIE9s2PG=RcWbFj-3u21wBWP0x7h!icW4gFy^!s8pAm(5g zmr6w1a1PB|Z;H+Sj)Q%dhj_-;g~v9$4^!mRs$$L6vX^JZWaga3OQ3YJ-rXj!RGExF z)U4|-uv~x<<89sU^%8S6Pr?0FvlMLU=VXA)L>rEpd0{rmtaeE*L+3f2j{?HP-0EtXEZZOQLN=ixF7^Z*V`iKglX2_mWE3^saMh+Q)K&q|X zJqLenAYH{a2A=xAlW=mQd>r;jCPSg2#Es|-P;!noJqCBO<8H;CA?Q+>H`GhFJb>>F z3^SCEJ&3ELH3QVP^&+tTjB>tE2rHJJbIAET2}liEjK~7veMrXFh4?IsQ96XEG#BF3 zFtp+`?80@xdBFD6Nocxf7EEM3x@}OYAW5*$oQ$mjV6`#K1sa!NZzOUbt{S+n;XuUP z=Gs|?`Ul9#%)uY#W_-!Sg&86ymIP1$Zu+xfx!)8!6OcojjxdFT8L18J1O-&j5zGYw z_+J+)2?`X3LPJqanTq<8>}_M}JX)K{OLF?eYK9XUtnkhSVi1cH-=nlmPo5u{>k6Z( zvy0T-7F;PuSJlA*H5oQ@4Iv5GI2>8IM{poX0-}%JCStDe235LLqH}kLwont9jtpJ{ z2@+e{aaDBEAkn57ztUmgblGfyY$edNn?fN5>9C@FhgVYR5tjy1J3N_~{KF%3O^N+W zkw6j2TRfvC+~zc4DYm}9Jg3t@ECcmVr_@hF z6AVn^$1!lIWal4@&t-e!(o2Z)0CWUO1Y{Md7)=EUa)FXpD=3X*7e@Q7~A$Yux6wok!TB zx?@0|U==SwkhcdQ0uvb*b=_2|GHRv`C9pdyE97pW6W5Gkxd2P6Iv3@}4^YHbk-(I7O10s2L2780e1os)Zx%(;pui4-;mMz zL->x}L+}&T!6#m{5Bsk{fZG&{2gmD)g$o3Q0&6$HDT3#sB2eY@!@M-b+xj}xS||S9 z@xFOdG$o*MFaHCKlP$Tp|NjG;@YL`(dbCskSbY`ZfFtnt3JF&Pw;8>pJ^c_W^B~mS z3OgjCv?4ByPH*6RVkZ#CvP{o23@(Qz%oGyI^!fR=$B}9dZ&l3=BsjYNSRg9o=U07+ zz0--`c;3fgTC6LSt$G+JZL?w6)EW>5sB zdfOE#Fl1u{H@mn&0@RQBpGJNgM9BCO9*f2GEi>?(gu9wzIyf}Plr z>Ifd7xrm-X5`{6b%h%*3_jZ(#ahiAqMS^cs!|RamTDO}^H-5g-C9$;jhbqc(%~+m~ z;C#cW)L?ErdlEs9K;l2bXG$0CY*Ra|JVY_0IkXUP=j=v+qx=xnYkT)6rjKp%eQWgg*80f-`U?JA&Z3G>VX3zITM~{6w-pCE?8Zs!JK9C zKFfxjEe=dkTzmsGtOlcfT$2PdqbG90ic=d1uZ=W=;eb9Ac@L=x-xGwB7K9pRsAvoDb@tVw?eCA5$$#TqAJ6FL_)&yTmNBb-SO^na7Nc|z9(BQt>mK}B}v$9saKG|Y&Q7*Uw_($gcWOAY;2x&v2nqCBo3^tBoG zeISf$O56nNc**%%>-d#cs9nd(12RhVN4K}M;fQ_rEY`r*LvxNwkl-Eaa~(gnf}!xpCu7mM+7~wJ49xV7lt9hM?7NcPQ3teCdus2ImUYD zA|GE)PCI$Aif`&_uQ)4L$HkDMAFrgtymL9e>i=P+oON9Q;W&warVcM7-pEbTd7#?} zD~nt#-CFU2+X;Qxss?RR`871)*k8Oz{oN2yXsH*15KDAgL77*>01msfkHQx~%R1Mq*0yWZ zd{6*+hWfG6+OMP)#+Ksr_is8FFd~&8=jAkCsJOdF^?V3E$hbBI8-7u@RE3nb(?*}L= z?MRw5T~`T84Xs!`4x99nik8a8G%v`dMNwSD-bj_}2x+dk5)jFKl1OK+M8*7Zh3JBZe=pr~I=!FX?Em>qu$+N6 zFtOC`Z|BTsix4f0g1^;wtDn?GIplNq=HlmeHan4PzZ!w6a%=E^VOjNtGhSFgFeK$nFf(%uvUL}TQn zQ8veBZ!(?Lj^cc(g44dap5SS$eKo|x-LFh9K4p`J*WN`3!m&}DCMm<4Abf45^xx|fS7zZPf*a0w+ zJ+f{Oy3vCku7hze>d)wNs70}mmS zwKNDHJj7;;n-46MF^9YT3v0Au8%mf--S@hS9>M`Lw+DL(|Hh*mx*FT4?>tZQKI(lj zs1N9PS5QsrLJL%SfPX$xte!B;8t>*Ena8cc)8n(tq?vJ;GW998flOBfBY7lPU6tEN z67TQ+)zb75QSp0@UOz`KCg^2=fhxc;o-L)~AmJG)!O$Jb0Q&+|XXg6^-exkH!E_f4 zJ9BhDKzyrmXx@YIIRgmIwZ;bj!xi`%Wfq*_68yI*OfC}>&mD{XZ_+LKO_6K0d6kj2>1JAn9~F2(ihuP3?}9|4NU88C#ji3J2^Ag-CP_~OjzvB zY&kSw@OxrzZB+rzVj%9^9gPgaYq%Loa8D4{X7JI1Pxh-38U!Ksw#(6mQP{!OoRgx_ zo~vm&{`GY}pgeuq$FmF#2q^u)DC=!mu@rb#T}j1W_Q7n{hkR5FQ261+&9PaEAb3!) zN;sMBCW42ssb!U~osebq{hZNOiwum9!rk=DwM?Zw1A>qEgw6&4L9W9FG|kno%hijv zr}Vdy2e2`g-XA_Z-ZF?flq}#t9nNgl*t2{8YhLtI6`6t`1FC*L0)phQcv}q778)Kt z(FPl|G?Kh+_9!*Vq_~DW7h+LKr_P>3iw&^R?GhP%SR5cLFnttQ20uZ`39&hU7<0=LNfQ;+$>bNb`F- zxwyE|xUrOH(>W+Pi_m6+ z<{j={WQ$uo1`e@#=ON;^Z^}3*J5<+Pst}@s>mEUV&>z|f+r6qX0FTzb97dhZ3|#b! z)3i^#K(dp8ZKZi~GB~T|6@hpKCsc)ltwPqA&TsC{L)|}N1sAUAg8%l_;9P2db?B(z zS>&Jg+t%Wsb`zTG0h|}U&5b9K*YMH$ztn38a;5FjjECfLi_hR37+p(|;vG)CWWki8 zw<{|DYbQ>Sx9CC8Jw|Tb#=q3g%}K}5zWoARH9bYVfvlE_?4Xj=Ibo{`4Cy)q+hYuV zD$e^b0#Kk%0AqXgqbMYvKrjfgu%)?X*721^|3fA$|B?5|9XG*dZgX`tJG)9(II#{f z6P;poLB$*2Zmd4-tgZf`%+5F%gi`2@3wlahECH}S`$7?=pgiCAuz#=wnbChs(q~%& zJYxP?b3aQVeB3j2BEd`8o&1Cb)t#Rxz*#!S262ix- z?O?;K9o85(59=KHRmijmWtPmJN9rWcHNb^goeZwh^1Wxi9>^S*#L*3@_~+3hs-};e zC{TLRvld<8;KFY6mBpbi91aG`wHVAA&@kL*T9@bsfEhNnM2BK zy2ttTM~@uh$6xLcFL~Z0lR;`QR6JP72^14oi`f#b?LMZo0}+`M=*PD3mG#$vjws%h z3jT3DN{5|zeLH~fGhmB@{dW!Z2>`L4atZNvpNLEZZQFJ09RLeNYl2$3W#BE z?{_F@Qjyc}thb{3}Q&0A>8&I6nwqG?mKf zu^By!e<|~p62zb9fQq9Tq*$=}7&-Qjf4J3{-!@)ML$rLCdD&<+@58IWOd|*7+b@hZ z$dl6$4TA2$G8SRD0;a7ULn;!g_RFD#n8PL~AS^mr;@pj7C3iwwZpD%iB7@p5TLdFn zAI(n+dA)z0bm2I?5Id<~dYg!% z6H1Iuy=5O$kQMR*4Chi1;M6Rmf&-cB~!$)n@>s&_ksq8RGB zIHP!F6!%4{Dop9sufCE5#0Q)m!iDa$5=FVZ_ri>N5|LB?IW6XvmZG*(PVceY?Pld` z?0mVcNqe!$G63bu9q{8j0PXCnQE_F?9O{3ng|5Nbr8%+LzL{l?T(bAKxrVDs_1-*d zf%^UF3TMnsUPI_j1Kn~=DtOetzausFJF9=kF!PzU<{N(;BS9+Xb2!9vu*+Rj6(6Y0A$HSW}hUa;HBqEPE(6t*q{fl5&nI&#>v?J;JIr_ zFVv=loolXFeI&dtp_@mQTtC^>@TKl-t1?ETp$wp?@ZodU5N7z$`!npqtG({mUY}lj z!>_$@>@5-BYYe|JDj|3tuNQpYiKJgMc(c_`*1v7J!*?;Qfx)tm!v?3*8a!?%-%100 zkQ<|m)3gb}qDK%mx(&ZB?5|V%i%$sr-4X=pA+~=xOCLrxzZ?5RjeYKDI1*Z_M5$;z zi%?Vi7vuFF*?L~$!}1_IGl#OK(c=xltL31U7^bR*)nF=XBSL$QUGhWRi6*I>|D~x(yw$%%8|sc=T2Mewnq} z1Jkq+ZZm{jLe;iE@wOd#LAxny*eYY{IS2DbWrq}3ZoC9X+K|`!Xxi31& z0r>jCl`|E*Y&zR23zc)V!~idVDTfkuqMzXvyy6MlE}XF0b}4?3ATk^f0t5%dfxwjr51xom5LVk z_egJOB_ZwSNkSV-I~!a~>k3W5H5&RRct=jtm10y@vzVq_DqUK+SZ*BhGmas3Z7gn; z10Q{oSSFKwta_KzMXS@OI3Ea#1H~ z0H$Cwf8cN@ob(*N!z%%q4536YJUOw#kU46_UT(k zEGqLsO10v@vrtyE80uI6W9>o>SbIZU*M9>SYp)j$MhBfnka}RQrP+-0f{l!=uAjim-|%7mU-6wQfb-cpgu+;`{yb ze(}qTya7a=Qt2b3c6`8t&&#VX!hugY#dK~h;Z$DcR}z#B{xgGigetGhTSnz2QH{#* zqvWYQe;y|=b^H{4aUj++C2`}h21rvAIn6Dq3CJpOH@wi-jh7ry%^@x3&5BWJW2(Uv zK!s!-n@dq98x1VIY%>Yzs^6Ift zV9S;yc&HD{;Lu6PJ3&5M)dsq*|7lA_s|dfuo|-RcbbL3N@&e`t7`Lm|%HZ+0i}}*n ze!HTGj2kNtrWz}AAB*}60pjQ);;3KW`se8U9*gulTG(-EVjhmNYmaHY(el&9hIk}H zFcVl0Q1dztrv^p-=_wa9^HN>(5G4<0VIaz8IXT5hJ#Cs6{+6YR9g+Qjl4v&snhW_K*#RoPH3v`^j>-s9bBZ7<%Q39E zp+r$`0Fo>vc7(lL20dmt7X+2e-iwOJ1_*Q(n`(mIbXCM?GhY?&ZPWb81_=&0nR$@2 z5pbECOn4>N27Xk4p=B^lV16+fxiUCrW|~MUD&X?`N2D!O)x?Nle-dI-! z_j^g`bdKx24Gymq{mR_&gE7!$mNm17<2qim>9iE}-T7pg__IwN-iog7>g-a*Sy6wN z#vcVAeeWFLnF1!`1GKU&VLB(mOrJvlViZ>HaYTJ{QaSqISls!`j5D>6Y*8eB@QH%$ z_Baka(80aQ$7G}P5m|W>dblz&{T~#D_ZVE5yKq^6{I*}DU>FU7#YQD{bPNMDD3fXC5JzT9zL4i{S0!(uVO#r^kVhH1{i!L;#$rUjDphajwYN5uF(ptH9QR8 zbeh<%lzpW_QrRHz2wh4B5@x$fzE9TRYP#G~YsZL7w7;bHaT7xQnEo1~eKmI>=o@V# z+*cdMpQ>?vgFo%O3{VbKy@qXVA2SuusM&QfXI!>c#;DT35P_92IP%R8$M{mk@Y2Hs zpMn&R=9ZjW`7OU0P=<=+eAr0jw>I~V?5AXVm1)_{A9~@<-R#1r zo>E>&Ge50z7cbEBX>Tw!cfcfRFO>#UaD8|Ehf370 zNO!X--lJvQSkuGuaR(%jO&0^abH<-qKK!|NnHu8Od8lhMmOyU#m;iKVO>+buwGkD) zi!v~~^45=tDsj}+<{;%DHED~&p!3lODF{$#0}<-!2Anu{zIjJtTiz9He1O_3aQt$T z2@bc%8#k@{IAhUCJIsTtPDOlSS5t}gXI9mW;$6W3v2Wg`_}p-)8?uUK?mF@*jjTGd z6Q~)rrP+LUZOD!`;9sidw1}LjmZeL$7Bl*wr3zldsVx0n9iU?>tH=LZUbHTVp7{xx zHOAY$55TSU>N(UW3x@}xQ&C=`;$io1leAy&J@I61VP5+x9q#1i5G(=?3q(ov!#rLv z*vTUt1_Ln99^>(1V>~uvK$P)W-7mNyPr6;hSfWzk6$wVCjPdPksliJ+K;lbcjxZ_B zEkj2Mg3vp}Svq6DH{dwED1Yk|PP3gmjE#&V;qBGG%V}86A2( zsR>2lLuME!Df!gtE2UHi4$*P^RBsDke8;L2xBq9d6?$u3!y!;54pGX2s5#jkYH;M} zttTtM$cLzAG|Xk*O1^)dthJDU$_IuKBN@kB%ft!fE!~(`nzN4{0<^lkCHc1s*^U@- zP;mM4^epXV6t6SDC}38VkbxdM)1g&X!6}>!?{W&hB1v#4+ss{oqYP@ZBsbh9kyXte z-*vD!N1mqzt{h0YaEdng$*ZT2Q1A+}pka~eMlEAVxl!yC$>~xr1jJE1luQm4yP$3Z zIH4^LnWq)i^(2O>yQDzVX(D%~n%6b4NqNROa-xN1D>)5N*tNLtn0`^QVaN6_RRwdgVSYbIGz&{PL<>T2egbYjI(<$% zpao=gd5S zlg9NyF&;Fg`qrE}fvzd2A}ocMVg8$$ z|D6mC&Ic=Nh|&)^uLckn&^&I2-_85rz)+rLgLIS^WhiIid1^5WarmD@h+;SdVU|s& zU-24$+{OYQJ-@F-GA{ zSy(tyxI7PCs^!(9QMbanDr%8I?{hD*hFYG!POrMZ&H&fjbw=I*Szq;&U00>Ixz`nt z&ePTt4C1XPd|!DvA%((C;jg+65|LEWv`W5M^Y8Bzf8SKh)#zhE%dMjVAb z@=@5MX(;TGkHQGz8($$+K#<$$z#p!H{>1fy-cf0@scMA1rz6VZTjFD5$MK6@qj>d7 zFTpI-b7m(~H-h~t^eKGT^U~%1%)BppZhlvDpSBtXz5c*e2CIWMSEzuyW?{j%3fOIT zUwCPHu*lM0V+Jt#u9v3ED{Bis>c#mTe&NOG!Q2cDSu;xanfiHN&%0tb?c}Hdmkc~o z{PWEv-U-#UDy1P<8pomuko=IZeT0R6etF} z7Q4D(9Oo40n;NdP5jfpC%Bor6phXtbR0fSG$jO74+ogS#C6~}9WNE^l8||f_HL}K6 z!^zmPVvW+vyo(GckmAe_s6uGUx?B z?)2bDp$AC6Boprlb1CUkS%!UzewT`gWKVd3A*BWT(QpfWI`uE<^Jek`1=k4AZcEP( z#SlGbUtWadg=k}{cVjbF*!JrOXLGZr+-qfz-e&N()B z>@TQW^O{5u%Vw;Qz<%5I(BJ2XXcRDkTNQ1vpv8PkkBC7!s01u@lHffwjv zL88N*5F)`Zhq;pNP3+x^K5c&B%-cBt!l0i8j|UM*e5R$JMwX7&3*h2_%dg7V0yFhb ziG0|!zgUmG07y6#_zZ|d0OH~f)tws&A@;8Wy1R!)X8a^)7W1>?7%$1CwX8q0b`CXL zvnYZ;nswdVv(A5g1v(AK)kovY#YS92h5R{@yCzufhS~j=3awYKU3`!VF&0P4J`ZwV zj2r>q92nW>cdA^?OvDof)>HsI2$%ku)_NB_5~AKZG<)B>96ulJslbce(k~dZ@{9YN zut4EMHpy~)PHMXe?3Jshp&5#$Lm9WvoSOgnc%yWr zAiC1xY%Q-~`MxTON}mI{biG(FCJt0`Ewg{`CWHOKnX=-SZ&$9F`H1-**J{EAUIlBN zdxvB!Fo$^Ujdyjo{kB%Sa7^8*!U%>B5QdOFGXj@pFpIVZfjP=PK!VCv#N&r%Pkazd zEP0V}P3_Q|`EWjC+XddlT1*@Dg}Bo2(Z@8D3^F(Z^YW_@Vz&`(d>ulqyLUnyDRE{< z7tOfD)9IBeny#D412u;iNiF42!D*CV;PcWE5dgO9)^q&j%}&2uVB~)SHA1%o+&fqV zI?WpL=`>2j3~Er2GyOUUbhFcFhIv{BO`7@D)dZc)TC##jC!R8i&%;7)+?rFg8PNt>x;em$ZV9zYtI~B}55ABf9Py&3KLWxmI6Y ztJICCg015gJ>A>mpR=?aheQ&G#~NpfIHFPkW#xk-WGbILx~B5AmK3vFbcsrySNW(M zgI!tk9DcF!DgNFM&XP>sRiyV=P2Ov_V71lBftDTbW~WJ zD!Gk-_Dyz;-?s-n^Ap$I^k6wZRIHU{6%-wc$yDwS+O{x-=5 zhy%-d1AGVFVHG;w`_a@5>wz4BZwOlktA?lsW(E7|ffbOw2}3vnHN3Ugo`SNrqCM5L z)YYdC#LJ=B% z+dC&oOc7Xlkp_4KAX;@*Kbc&BRk4sw7|$Nyf*@XzkgFE4e!1 zW$j_##WzLJ=`(iLca|HsQINRA%c8y(Rpnmn9b#5wb;~T8Ez}F@K zlIb%neQV|GrWaTFm{RTagn2;4dTtWKeKFFzR@JABwl9Eh`Z=$SrKgH>=heD`2o_JSx2*$w5$wA7 zMn5dO5fr_Yc~)};rhoPZp<198n&-jN&^-T6KL)SGnZptl@wI&l{+9j{?)S9`_iyuv z?njm0(@Y5BYva&w30U3x-!kCJqv%ip?!ek;riD>=LrUx^#30VMfD&{AKfo7z=ZVc2 z5&8{uH(V+6AX z=DzKdsO^b(HYvFW{hG0D|GHO(BH zSGh*^-iQ9pdt414s^LsTV(V?hK#G2m&e#h&$G(_m^OAh^qc8pMMY@*O@$U1zo%|D;l$;fbo2$X6*FaxZ#qbGM9Rn?HdU)&ef}NfKU0WPOZ3q$Js1=&Qf6-3YtMnqNdC5t0~$lWZw(;4Q7=xu;XdUw`$epXB zzV~cRx&q{Jt`sMn#w{LuqSI!QP2Z%+58B4#u>Eb|%%UbXk||}{?G#WsclNgZHh0MB zVDKYQ*KEKYbQ5ONYY0wF&Zdi9k7a?vGwf0?vQtEf*xWBB8mYx)|^i2V9v7XKLY;sAw;=<)FO>*4U1b4VWy<^zWtoO3$6lH1*U zPzk`Rp=+@$AB8fib-KK%4w@^`vE%&C`WC+;PHZ6CJ zG&_pC+6(&ejv(mBcOE!vZT$}02wJw0l;4gszasOoTNB*vC-YG{hGTX*pkt<}A(t=4 zTVa_eF)zDP89_`J0zi!^|_$5(@`l8&UWza=|^;pNHv5Nvt?P?hHgs7m4 zb>C|t1HqIvN%X!lboe1>7pTX8dvoL%V3fE2FF|xg3ao$QHcE-l9HV zW`fJD%fC1oTGzV!i*y~e@LyqdwWn1Nm-^6Mg@yl0i7~0W@h18CMRDZNU+$$N&M8sB zBSLrx6gxgzS!%(e)Z}_fA{%+p%1#XdmFX9oj0W3t?XUTH@D`;#Ep`D6l%=1 z-@XfXjy)x5{E!1#rCfw#7Z2GNGi70Y%$wL^mE#^ygosKN%bf!`pdMPX!- zRC<8bVKAoS{8R!pjnW0%O{U4d&fh-SQ=-C${YsO?;}C=}V)ZFj5;VB)my5zh>HL*G9)`MSo{Hbli^?DM*)kKW-dJ@6aS@L=; zu}g}yD&ty@q{WY8W}KIKsJiR`7BxM5xYP^U>OA*#o#*^nPJCiCL=C^Mhd2tB_XKtm zVGz4_j@e25$%YTt@LEj=zlRSj^wU=_1Mkt%5)W+&iC5nQd@mZxys_%X7W^L)bGixm zPdYJtUB$x{A`&g}NCT@Qv!#h?guEZt?MG<`-3u}|47@jwj=(!pXM!IB>1wLpKmac$nthyA*(uJrNb1RZ8TMttzSQydW_50-n)vY*g-q98#UgjV3 z(8gheDWO9;OkuWojkQMO-)!)#nC@yff=9kCdpqop!kF&Y9X{0Ys9kBUOCjzLXFXRB z5Y>TSQ2(%ogjkXBddb_8<7Tu%|3FMEK_o`Wb4+TO#SXB!o#4iIM_nd7|F^9D8LRux59n3u*G=l)TlJ7;NmheFWY zaZ)x~;_1}qrUqcEJpJu1D!)J>;zzeuD2P zpjSI7`mhZ1C0T+ag0wKJLQo>-&p<+*ri&_sBr^;$uyxPIZm3UY_h@asCN1<_9#-*s zKFyW^2l?F_RWiID{_Iu==X&r9bMum@r^^rcT$o9%;SmhGk{?PKOXd*k6+xowI!4gz zHBq2jiM(lV2Nm`{R;+hqZWIXM9crdVpxJ)`@FqXIQCb96!9bhpC|n8S4aN|3s6>54%S%8SvdjS1Iw__pMl=}UO6M3GjqbFG9&>?bmctUF z#H1qW0kv(>q+$C<5u{4A>nI||LO|bGenDdC;=U0%^-I!AenLA4rp)}htyBe`sN=?` z1xs+H+!UH>G2B53Yt2R#DaeIHF6r_xDM98uJmFg89mPJ+!=M=5aXs2sWI>YZtKJk} zs{whm=WRzEt>IUV33ODRFg{(mX-7X@o9`s!eN?O$EnvMb7i=ZH9PoR`FUBzcV5YP8 z`;r1q9%R|rikXCA4ZDW+%k7>cjV;0b>MZ*m8Ay|f>J8R<_mF<#8!SZn7^ZvD=`g+9 zi0VP(Ee&Y6h#|dQREzTGH|mtpF0ETGITq9ffXW#`JrA-8NloKO<-d%sajEm{m1W*< zr92FE=W#{Z{VOW$2pCwzY~g$WH>iMAp*EYQD3xYg_}XH*!tv2}oh23kW;nfC{zmiF zu`O2zkOq;A%cIPJGmUa~Y5%-?8*S1mriE|AGHnbwA1H0uK#Y>TVOV2BKs0~%hr6wg zwc@i&IL!p>)yf6&{zH~c_%f<24mpS|={`V40!}cJ2Y!4I{8rZ@9rkdW6lW+26gk}) z%F9W5XLtbX;&18>%|+ZZ<}Od5 z;5hZOME^Av97>WFHwK6GjQ)-yl_R@Yo5>W=4GdqWz-Y2&~J%!)bO%SM|gyr;(0qy1oy}V!{;4U7A?#@7x7-py0@J>3(dndEe z@PAKPuVyqpXo%)`Ql^r~z+zlm#!4^eI!+dn;LK`Zhcyhd^oU1Ty+g5yYl3u{^km!pF&DQ&8 z%Yn8X%0>31yBlvW>jv$$Izy`u!t9l9wF6X3qziII-mZgxF@`V#SrVJRCOdeUPMTN& zUF=OvK2gpX8<8f*@J+^9P&kMyu^-p*fYaDM z2N|EATZMh^Mnt@TRiRrKzxAGvXNAm6lw;99KiDS;ZoCJ<#-t?|&SeL0p*%te;3smx z8_r!BMtPSj>8mnDCYp3T(928uw{2>453wK(Cks8e{h;H3glq~26M$;r6B-Fs zTwJL1<}#uJ6e+(h$rr{PlX?wvQ>pg@(v^R==!@igj}-kA#~gbVDfloV{+jWGujP2_ zpI=!Vx*P1Vn9y0@L)Vm*9sgs~(MxV=zfE!lPN;iwbm?7y<$sHfK>YdI7^~>S<2xrDt&g>WIMieWbpoVuFRrh4!l4!8 z)ek7pvmts(8tZ`r$0NRU+n1z5#DDpn7yINLGyC8(4R9j>pW-QJi0;tt0NKC**k%Sk z_sY!*YCoJ?vA=M06cHysVLM$;ysB@pn4Kr~%Ne3nPU77gV!&c(O!tCLuP@Cd3ko)A z3DA!WtJRc6{)n4J2mP-@Uc<2E8pf^{bgI!D^}I4Yc#Wxg3^D^L=M%(# zz#yF=x~AUOdqZc!wa+F7_HEAm05HtWTg?2MUSh1*df#rPtqyMQ^x$9`&4zS=7{H$P zc3A-sX9*l+M}Z(kA!hmEFpC3GmcW%DZbqRnLty5EyokbJ2E!c0@(&~rr1EGsqzl9# zj{*#G&5yPZ$&#NRNAvSz$sZPnAO1BjX7PTwT!AhIizpnV3q*7sbrlchvsp2YLQoDv z0o^!Fqf5;>3Pqli;$RcrY6O?Z$A;+wQN$M-(-mSE2N=5i<7qLUGyqW^L&ENi;wPqi z2y`XY1ciA5If%jxBsYup8Im=hEREwZlqGOS(K&`>$5YOj2==n5?Tbv_Mz;mrwzXe1xfJw$wVI7grw z_uq-xeJAmHv<{Hqqxq1|5K&qzlsG?vgIRQ~IY*#I(X9f>l0Tlzqi9GO3KQM6kj(iX z4x&>E$&#Nfj9rC`}m*Gftxv6OtvrVDEuZAjxA$ zK1Iji#PJ+MMHgE+Trrt7?l8@BkPXzaFqE;F&l(V&a~LYxoU}uVkZ3+;2n-z78VImt z30wn=&o7Rf#81s*$bZE^{>6FI=y0Yoo@sP8rZN-L`9W0j!gPj+c20T)C#-1Om@W{F zxMVS@Wbd6(u@Hfhb3yCTYA=kS-9jX)%tHUd~ad zMf_wy83HqpO7xJd`6D1J(Z!N1g^MqV?jv%=8H1n(oMd|ZQXftZ_n$Ap` zCy*$NM8PmaU{0a{Cz{*9>}Q_a&+<4A#dL+3HOvcU2uzf?&Ze3RXX`isWH3yW83-qs zN0D|e7~6tFa4QCY=>&oI7|~N3WiZTH{2+D4C#f5BeYpFNztZ_;+HjoZ966y3^U3Qp zzCN0#=Ype#L&1?ylWYhD;s;mBVz}sbQ#ogp9>;|x%y5!H6a>>fL^}QJG#%vTEm;-T zL#Q-RHb?{I2jwmCnVPs+5|923K$sNfR9rh$cckV@kb5t?Quk>LPQJq^{H5u5A+hZ<$tpuZZs^P5IMzq| z@MX|{X|%c3GmPKNhl~2CY`p4VtO_lil1*2zVgsda@|e;#)%qQ|K&j!Nlxl}ncIsDa zOJk3%kmuh<49ndNm@T45Yj8t*aulD+{w{~~?dw+Yr+kf?@|DUhk_J4Z* z;zj?-PXF2SH#<*TjcmA5aUBzuhVNQo!f!!FYAe@Td)Rx_TWi%;>(SEi5-XYKD_q=b z_-tM(&BP!2J&JnUa>OKF=#N)%60J{{n>|T=7xdp`gqCyMxTjCslN0Edgk*a@#)FvpkuJpv##u4_z%lR0(CF__ z&pN}aA(7Y*9Tp>JQzVu|d0Ec0^6umN5ANNNcj@?knXn+Qw03rqjmg_1wTgbE$#XRN z-N0AJmu_=19}e$6UVHpU6?JQq+vMEqf%h;!ERK(}0pi88G#}2VUf5PQre%pw-pTv| zM?-O5OL1OF6XiTuqE#0^<)AQz37{rAp5O=h7yy2n|12(r$i9<&oT5Xf6ZDT=pvTvW zeViIkV)eUNI~us+engAFgnZ2gxE(7i9ey((qxMOE(s+38$r&rQaOV&sgJlC-iv~Lw z%7#m_-f=Ivg$IUP-Q<=W0O(J9BDfW+!$mx#FuL>~aW~+jF_NWgFni&<79EuszK|Ut zO@T|Hk>&Wro{UTz#`V1#5)QJ1`SJ6orXt@N=hye5I*rkdH$FrFs&P+NukDtxE6dPz z^V$t(r>n#1Y6m-N$HsAMv?`iKB)3|15J5^4tMQg?Y4fCjY|wG?^?g3~U5cZO%=V*W5K#gWgT==z;~mQr+0U)2%pFy=)Z>~eB~1<%l3Ji5*r_#Y>d^@Q ze@veHk^7-b`>|mGgaOAHF~2!5K^XX~RXQ$bYdgrGOF23cDv3Z837 z+Bdl>m@)ptxJxH4CaB>U9J=~qA^;G8Fg09p?2sv>xFYn%{D?G`~9Py5ZvA%v{IcC0t}%PuVFZR%bhQS^<53NMoUyjOr-Vk)k9J7Cg=5pz96@#OFf-V zND^P6Y%(gw`HWnVqiZyVBsm2L0gyqa`uMN_l1iijYmKPVnvw1iR%br)7R1xH6<;jYE(ecJR zfKRpvYHKinMVXJDES^8r2EtDGr->>#eWv!eIYMsG4Q-=g9?t#JU6p<*|3VA`f*~vn zdD-uAvw310U{*IWsQSJVhg&z~(O*p?Q5W&Ph*JB}D(c_Yp|Du4)PD3QQEIzgVfEQA zaU-nk{{>+0_=>uu{Q0ovqc7V*0Gf zz2al7|FV}kwTQFvOQI{@{p{|%-g>jO_X#9EAMkbPu=PBr0;f?Ig)%B!cZ z){{N-#vNla?K6*_?{L(8Ahz{ zq9ZBJ4k>0BNzT}h2ruygAk<*EAPf)ssG!bo&=EUE3c9X5w z&&e@A`P^_*_=i8Js>-L4mRjCuAb`seUp|4(0eZ0WaRRh5pE1THj%$TmvnRo|gW5Xk z)&nm&N4^?-CYYfkINTl!xs}8vQ`(o!ZkPy`kkgwL9ACWs-J`(2!?m5Q+|+9@yt~^i zo?(DHa|MRIM8jNk&3%+sih7T5>YFav);};%@c6X&kn!$AH!-p`iR9PZ!d1QgycJhX z6Lz_d5Aj&m6i1SU$cWB!DeYnrcLFTFx!gGiPd3u2`2n%8ibILI0}fn5<08UMT-QxQ z-Eok6mrmN>O^1%&pMt*|W7IVF?7#k+`1AvGeUy3~A>KO;hq_+r0p4%p%`QmFB%zTr z55(o3H<)A(6zT)+TpKq|YB)D+nK7l(9iC83l$(7NfD3eCh}(S`ujny58NpymCI(}A z-!#g9mQ7FOgGdLk29__M<+yx6hYP8=D*hf$y%}@G9H>?su{p z2vO(wI?=kg4gPS%nFeq2BVaXh4@Z*op?aUp;;iHa_xqYA0HuA;-*4B+xx?#wD5Sg6VL072hc~48X0e=q zjA2TUg6MDAR@=_UAVrO#U$)1AAgISoza@jJy~D$0V>6wHaays)?Y7VvpF23|io0v= zoI9Q;fA(0z;cX%D_HD13J`}%q3iU33uUcE}Ja#)AN5dijKXrQPU=nX{E~iRe*T6E0 zZe@sMe_R(=iSSwBY`?mFK?hgbHR4t01CahMp^B8W1}Cid#c^a4+;ZsK8%s=*Ja zIbI}}c6NgDXZvOw>otAo;pDVCogRo1S$L^qHZi>)m2+g zmW&6ar3~d3ZwSIsNsvnI;~i^Lmv1Mn{a!2Vlf6r`e_!<)sd@#*`(dyq@?ds zST?6ypdBG-jwT~#64glE>&;Ejs%O)bBFVQoufsC23!cLgF7)jVh zQA`#duoc^>SKtyP?p=!^U?bxQ?RQgi`!d9S9H;C}NNnU_sE?26&~-Vh*Tfy7WuK&; zf2FRACHfdyIdRx9G3z1v(k8JQ%?>(DgR5P{w%!SIcfLCr% zz5g!qaU1F;$HlF4bYdJf({+;{yASwo?d#~5WkTnAypi*crHxkYvL^)Z6TJJe^B!29=6g=dc9s}|8j+f+0pEEI?i0CFRk~0 z`xQ#RF9HU8ApyC<8@-A4QEvNmFmlU)`n#!7;*hK&R|D#-yPOpixbtv6nh!M|oR5de z?1RI`eaoju)j{!$9_6HC9%iS2ZdY(l%&2yu#%s?hy5sN2~cAoZMzIyuh#ZLcTzfaoa5^8L^Pr@56sCqu8O`|Pv@6hQ) zimq?Pha_~GEh9IRFq-$TznZ`kGuXiyu79WnSz|4T3vX9ZgYZF~?*lhI6skddJ#_Cd z7BDP?_$6KO0Wre20lbb}gnW{tk|yC!zJs4=lgYXCYXihe{69rTg4>{2-4SP&XP!5pV@qswoF)BzuN(F+K!JIWFcUhAbwUCx#>A!yF@KrsFZ1N)-H7 zh{z4>jDbyLV@HG9Q0%p)W~f0AcWBiWVK=K>X0PnkQmkYbFIr zyWYd#>jcyDt`H=SQ+|+nQy8{+2qxWGvmmDD@>C_wouS5$&B`a@FbSNCN%==Em8icSRX0L8+MNQzDR>(5jP|L{6Z zQ~q6FHk#kplu{-Ao`~?-Vi(6-;TIkTvGNd688!f-Y&M{^>beOE4}m;+n~ACV5>;3| zW8ZDyP1&oOuo1}eDFvlRZ{a4{Psi5l=gwaReu`x4QmOv#ItXpXL_^l3HgDVT%pz0O zuL4Cheww7cHmioA9!v;YpJN-7rhH~Q7^r{u;r_C-csbtn?pAs$0Vd_yk;YGTS6y8m zK&Kt9OUEsNPKmIv_AlpMp6;$0jGt%M$x8P*8mkKOmhUfhWBz&gzHsGsJ{rx*-B<@@ zfJogqVYuBe0N57wWSbv~lqUXVEmN>Wes$e!1Yrg0L53xxf@@MseVu%K+fFc-bU+}I zm%0c8XD4XHCoeoxlI5Ac%V;RdM_~)d`&D8SVx&>BSs-5~6Q^@y@Hc0qqcqf?>&bNMcE3se)~;RLJ2>h?o4ZNFTE%hVeb zy9JgWXxUwT;Qn(mabx zD5f&5ah1s_Yx?z-m_Pxv>v5M3y;e}ui3^apBE%rRX}EbkNTWeifC>BIvh8<8yG7fY z){G@3RWRsO+IlK7N1gD5WTC857vnRaf<)Cfp#jmkfQH#-0(*scQ#b8BKb&bIJ+TK^ zdV`ycvon60hVI*|qxOcco<@;8@86uN23X8u`Bh%Kp=nfP;lHSnijIL4R`S5`X+EHJ zKbz(t0pF6nQt2Qnl5e?AKz+B&o{9p0-|}-Qhca^_Bu!eSPkNz$fGWjGmF%1#eq)Yx z#cs>>C`hpuOLtRFntoe53Y8>F*wTZ0*K6s1zdy&Tv%cK2 zHV@_TCDkhUSp&(pDk#wjfauCge&N@qaB5cC_xQfYreVSu{_qF+tA_}_5kSh-peY8> zty@She44ivByw_#h+zvw3f>eO=E&CiD;=JtXQwHZq>kX4@TA^b!Vv6oF#@8Sy^;CVJA?*dQS*vLp=#c}T_T7J$?RV*&pMTxD^Iz|8ckXuH zt-YrW*F!U85k-t!frBh^dNO91B#dAHf^wbvU>BRO$fdhgo#pg%|Q_!97MdPD;Kx zb}q|_>$W3HF`}24AyXBqbY&usFTEINEX7XAN0sdE-E^G8Iy+iQQyQhR%U9HpYFI@r zWJE%KR+FPXnrWqjvigf(k;lCjXhP zMuq)8=e1|j?|FnF{PZHk&xd2pY9bcl&(e~xXFB`_Npx1H^2iwQ2GFvkx z1NuB`w_1Af?;160vIG;TKj)nGvD3uM-yCCa@%*u#q6&mK@MLK%riZvDBAWD75>2yFf3I(Kk zL`dTakE8AqtYtoN1r?_^q|*K7G-ImnH2Rk+44FX?i6sptD~#Bp$K7?1icifMvs?q5 z(#=-TfQ(QzQQTD`;)eySPdO>ZjyO=VOdFGpSodo8WU3ESa@?dd3>$QF6m6sC3s9s* zQIP?MYg94P6fDy+x{mHUUE`i7jU{6P&br_kR_M30B66y0r5k~q&B=AAN6>s4;u`g? zkv@;;F^JW)j|btK2)miMV<6iX$7A;57-~hM_m9=778@N>ikE_Hj!V$KS7p5PR8a}L zb}$F5Cz`rKBYKc&{aCnlbrIijl5oTU6KnYkKg+h4k0qjQ3)V#??}*H% zsvlKu_jpx2)%qSr2geW#QB}AGSNm%g=}pJ00#Rj>0%W=3 zUNKMCMW4g(k1nD25#}E(!cx7DwI2`L{0(+urNQWC!^wa{@;30d<80txpKBi4-|GSA z7cl^cQM~GYia%WQgs%#3p0I^yulnY;aH@WyEy16ju7H@WHa1AF#ozBgD-VnIunE9dR-B zd~c9vo!7)&;$Lh`dD%;fxMAg|MM9|=ghu^fJR)c5)s{bR7>!HZwj2+OgLK$UxDBdB zI2j|EQ+~O|Z}t0G`I5kIbwv;Y!FipuRX5JnZ$}>_=*1Bp51J<6Pc=|-=!Twzq0u)C z`IJdg+reNV$AcPxLBK=HOF=65g<3oKr{}v4_=V}~0M-(+*IK8f^NmCtoTDq54Wx@a zNgy!7dH9U)Qp|=XV~LxwT}-}cHVFXGHhe<_1*N`aMHwVkSo*ZAFoT4JVT}S9UUvs$ za#R|}5Mgpm$jerz?H$1LVGcms_qt{g-?<3|E`pUo3BewdMFbSff3j`#{fC`+#@ge1 z9oamwUH&-(m0Wq(d{f;t(HSZiql04j>Hz&Gp%ut$nPX#H)da#b&n->eRMU_CE|tL* z0GJ;Cf-!t!(96Npq~>o3O!9k$Ynb!CaY#{zfabq(coqT0aI^}G(mjCGD=~NjwcW54 zy82pw`HQIu00CjY5iHii=QUS~k1rDoB~IZh&;axwIGgR80x2l;x)?GIqx^3qbC3aZ zJY*rsRn$Rw$pE2$laft6G4<}<#AQs`E1;L64;CuP{Rgd<>xTEbR!Q&`$I)0P;TGTT zAqFGxH`y2j@hQhCMNIBe>M_Pqwxo%NX>n7j{gPxzeL;lXK6RDw;Nw1dPo4- z2pLoXiMTX1^jYd^_Dfq$Fs<*XnyWgA(dH{O$?|bhC0@(^;sAq#_oo~(qtD(D%RV{q zM!!4srm@*t;_;Vlwy!6gX!E`Cl2g-Vo4BrFNki?VIp4-MnkVg#U$D(jn6XRMe{Jg+ z=e(-Kx0>k34J43Bahgq=O=U#nzH8hVwhue#%J8weGQ^u4^mkR1GxlXjj*H%~KfpK6 zks$`(&9dYLJ0|SSJdg=!7^>BWik7?M3(0k)%mtp|h;W__vp?|wWPbs^$ zsqpgaGhq!hWSZzE@BC=sbh(ZOAR^(A#3 zu>A`LWI;rE&3LlxV)X(9izrOYvje9jd9OFF!avMhfB=G$^bMxlJ(lx(69NDs5F=6X zj~1W7&qB1gjL>}PGcX9yRy?LZ=x^x%|5|;kZ}qLd)wlXq-|Aa^t8ewKzSXz-R^RGd zeXDQvt-jT_`c~iSTYal<^{u|uxB6D!>RWxQZ}qLd)wlY7oA3VvIoN2L0N4Zo=-MF8 literal 0 HcmV?d00001 From 3d4c941ba8cf16942ac76b22b7f4a853dd70f732 Mon Sep 17 00:00:00 2001 From: Joram Wilander Date: Tue, 17 Sep 2019 15:13:17 -0400 Subject: [PATCH 49/53] MM-18512 Use options struct for GetProfilesWithoutTeam and add filtering to API (#12200) * Use options struct for GetProfilesWithoutTeam and add filtering * Fix test --- api4/user.go | 2 +- app/user.go | 8 +- app/user_viewmembers_test.go | 2 +- store/sqlstore/user_store.go | 13 +- store/store.go | 2 +- .../mocks/LayeredStoreDatabaseLayer.go | 404 +----------------- store/storetest/mocks/LayeredStoreSupplier.go | 404 +----------------- store/storetest/mocks/SqlSupplier.go | 6 +- store/storetest/mocks/UserStore.go | 14 +- store/storetest/user_store.go | 26 +- store/timer_layer.go | 89 +++- 11 files changed, 137 insertions(+), 833 deletions(-) diff --git a/api4/user.go b/api4/user.go index 927971c4f2..bfdd7baad8 100644 --- a/api4/user.go +++ b/api4/user.go @@ -557,7 +557,7 @@ func getUsers(c *Context, w http.ResponseWriter, r *http.Request) { return } - profiles, err = c.App.GetUsersWithoutTeamPage(c.Params.Page, c.Params.PerPage, c.IsSystemAdmin(), restrictions) + profiles, err = c.App.GetUsersWithoutTeamPage(userGetOptions, c.IsSystemAdmin()) } else if len(notInChannelId) > 0 { if !c.App.SessionHasPermissionToChannel(c.App.Session, notInChannelId, model.PERMISSION_READ_CHANNEL) { c.SetPermissionError(model.PERMISSION_READ_CHANNEL) diff --git a/app/user.go b/app/user.go index bc5da8a814..da2e016c21 100644 --- a/app/user.go +++ b/app/user.go @@ -589,8 +589,8 @@ func (a *App) GetUsersNotInChannelPage(teamId string, channelId string, groupCon return a.sanitizeProfiles(users, asAdmin), nil } -func (a *App) GetUsersWithoutTeamPage(page int, perPage int, asAdmin bool, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) { - users, err := a.GetUsersWithoutTeam(page*perPage, perPage, viewRestrictions) +func (a *App) GetUsersWithoutTeamPage(options *model.UserGetOptions, asAdmin bool) ([]*model.User, *model.AppError) { + users, err := a.GetUsersWithoutTeam(options) if err != nil { return nil, err } @@ -598,8 +598,8 @@ func (a *App) GetUsersWithoutTeamPage(page int, perPage int, asAdmin bool, viewR return a.sanitizeProfiles(users, asAdmin), nil } -func (a *App) GetUsersWithoutTeam(offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) { - return a.Srv.Store.User().GetProfilesWithoutTeam(offset, limit, viewRestrictions) +func (a *App) GetUsersWithoutTeam(options *model.UserGetOptions) ([]*model.User, *model.AppError) { + return a.Srv.Store.User().GetProfilesWithoutTeam(options) } // GetTeamGroupUsers returns the users who are associated to the team via GroupTeams and GroupMembers. diff --git a/app/user_viewmembers_test.go b/app/user_viewmembers_test.go index 7a87b28177..2668bc14ac 100644 --- a/app/user_viewmembers_test.go +++ b/app/user_viewmembers_test.go @@ -618,7 +618,7 @@ func TestResctrictedViewMembers(t *testing.T) { for _, tc := range testCases { t.Run(tc.Name, func(t *testing.T) { - results, err := th.App.GetUsersWithoutTeam(0, 100, tc.Restrictions) + results, err := th.App.GetUsersWithoutTeam(&model.UserGetOptions{Page: 0, PerPage: 100, ViewRestrictions: tc.Restrictions}) require.Nil(t, err) ids := []string{} for _, result := range results { diff --git a/store/sqlstore/user_store.go b/store/sqlstore/user_store.go index 5a8fc3fe75..5febcc88e0 100644 --- a/store/sqlstore/user_store.go +++ b/store/sqlstore/user_store.go @@ -667,7 +667,8 @@ func (us SqlUserStore) GetProfilesNotInChannel(teamId string, channelId string, return users, nil } -func (us SqlUserStore) GetProfilesWithoutTeam(offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) { +func (us SqlUserStore) GetProfilesWithoutTeam(options *model.UserGetOptions) ([]*model.User, *model.AppError) { + isPostgreSQL := us.DriverName() == model.DATABASE_DRIVER_POSTGRES query := us.usersQuery. Where(`( SELECT @@ -679,9 +680,15 @@ func (us SqlUserStore) GetProfilesWithoutTeam(offset int, limit int, viewRestric AND TeamMembers.DeleteAt = 0 ) = 0`). OrderBy("u.Username ASC"). - Offset(uint64(offset)).Limit(uint64(limit)) + Offset(uint64(options.Page * options.PerPage)).Limit(uint64(options.PerPage)) - query = applyViewRestrictionsFilter(query, viewRestrictions, true) + query = applyViewRestrictionsFilter(query, options.ViewRestrictions, true) + + query = applyRoleFilter(query, options.Role, isPostgreSQL) + + if options.Inactive { + query = query.Where("u.DeleteAt != 0") + } queryString, args, err := query.ToSql() if err != nil { diff --git a/store/store.go b/store/store.go index cc96278eee..9ad3324a02 100644 --- a/store/store.go +++ b/store/store.go @@ -253,7 +253,7 @@ type UserStore interface { GetProfilesInChannelByStatus(channelId string, offset int, limit int) ([]*model.User, *model.AppError) GetAllProfilesInChannel(channelId string, allowFromCache bool) (map[string]*model.User, *model.AppError) GetProfilesNotInChannel(teamId string, channelId string, groupConstrained bool, offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) - GetProfilesWithoutTeam(offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) + GetProfilesWithoutTeam(options *model.UserGetOptions) ([]*model.User, *model.AppError) GetProfilesByUsernames(usernames []string, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) GetAllProfiles(options *model.UserGetOptions) ([]*model.User, *model.AppError) GetProfiles(options *model.UserGetOptions) ([]*model.User, *model.AppError) diff --git a/store/storetest/mocks/LayeredStoreDatabaseLayer.go b/store/storetest/mocks/LayeredStoreDatabaseLayer.go index cb0ab438f5..6414b80649 100644 --- a/store/storetest/mocks/LayeredStoreDatabaseLayer.go +++ b/store/storetest/mocks/LayeredStoreDatabaseLayer.go @@ -5,12 +5,8 @@ package mocks import ( - context "context" - - model "github.com/mattermost/mattermost-server/model" - mock "github.com/stretchr/testify/mock" - store "github.com/mattermost/mattermost-server/store" + mock "github.com/stretchr/testify/mock" ) // LayeredStoreDatabaseLayer is an autogenerated mock type for the LayeredStoreDatabaseLayer type @@ -404,221 +400,6 @@ func (_m *LayeredStoreDatabaseLayer) Role() store.RoleStore { return r0 } -// RoleDelete provides a mock function with given fields: ctx, roldId, hints -func (_m *LayeredStoreDatabaseLayer) RoleDelete(ctx context.Context, roldId string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, roldId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, roldId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, roldId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGet provides a mock function with given fields: ctx, roleId, hints -func (_m *LayeredStoreDatabaseLayer) RoleGet(ctx context.Context, roleId string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, roleId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, roleId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, roleId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGetAll provides a mock function with given fields: ctx, hints -func (_m *LayeredStoreDatabaseLayer) RoleGetAll(ctx context.Context, hints ...store.LayeredStoreHint) ([]*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []*model.Role - if rf, ok := ret.Get(0).(func(context.Context, ...store.LayeredStoreHint) []*model.Role); ok { - r0 = rf(ctx, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGetByName provides a mock function with given fields: ctx, name, hints -func (_m *LayeredStoreDatabaseLayer) RoleGetByName(ctx context.Context, name string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, name) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, name, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, name, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGetByNames provides a mock function with given fields: ctx, names, hints -func (_m *LayeredStoreDatabaseLayer) RoleGetByNames(ctx context.Context, names []string, hints ...store.LayeredStoreHint) ([]*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, names) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []*model.Role - if rf, ok := ret.Get(0).(func(context.Context, []string, ...store.LayeredStoreHint) []*model.Role); ok { - r0 = rf(ctx, names, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, []string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, names, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RolePermanentDeleteAll provides a mock function with given fields: ctx, hints -func (_m *LayeredStoreDatabaseLayer) RolePermanentDeleteAll(ctx context.Context, hints ...store.LayeredStoreHint) *model.AppError { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.AppError - if rf, ok := ret.Get(0).(func(context.Context, ...store.LayeredStoreHint) *model.AppError); ok { - r0 = rf(ctx, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.AppError) - } - } - - return r0 -} - -// RoleSave provides a mock function with given fields: ctx, role, hints -func (_m *LayeredStoreDatabaseLayer) RoleSave(ctx context.Context, role *model.Role, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, role) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, *model.Role, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, role, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, *model.Role, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, role, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - // Scheme provides a mock function with given fields: func (_m *LayeredStoreDatabaseLayer) Scheme() store.SchemeStore { ret := _m.Called() @@ -635,189 +416,6 @@ func (_m *LayeredStoreDatabaseLayer) Scheme() store.SchemeStore { return r0 } -// SchemeDelete provides a mock function with given fields: ctx, schemeId, hints -func (_m *LayeredStoreDatabaseLayer) SchemeDelete(ctx context.Context, schemeId string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, schemeId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeGet provides a mock function with given fields: ctx, schemeId, hints -func (_m *LayeredStoreDatabaseLayer) SchemeGet(ctx context.Context, schemeId string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, schemeId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeGetAllPage provides a mock function with given fields: ctx, scope, offset, limit, hints -func (_m *LayeredStoreDatabaseLayer) SchemeGetAllPage(ctx context.Context, scope string, offset int, limit int, hints ...store.LayeredStoreHint) ([]*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, scope, offset, limit) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []*model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, int, int, ...store.LayeredStoreHint) []*model.Scheme); ok { - r0 = rf(ctx, scope, offset, limit, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, int, int, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, scope, offset, limit, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeGetByName provides a mock function with given fields: ctx, schemeName, hints -func (_m *LayeredStoreDatabaseLayer) SchemeGetByName(ctx context.Context, schemeName string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, schemeName) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, schemeName, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, schemeName, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemePermanentDeleteAll provides a mock function with given fields: ctx, hints -func (_m *LayeredStoreDatabaseLayer) SchemePermanentDeleteAll(ctx context.Context, hints ...store.LayeredStoreHint) *model.AppError { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.AppError - if rf, ok := ret.Get(0).(func(context.Context, ...store.LayeredStoreHint) *model.AppError); ok { - r0 = rf(ctx, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.AppError) - } - } - - return r0 -} - -// SchemeSave provides a mock function with given fields: ctx, scheme, hints -func (_m *LayeredStoreDatabaseLayer) SchemeSave(ctx context.Context, scheme *model.Scheme, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, scheme) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, *model.Scheme, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, scheme, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, *model.Scheme, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, scheme, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - // Session provides a mock function with given fields: func (_m *LayeredStoreDatabaseLayer) Session() store.SessionStore { ret := _m.Called() diff --git a/store/storetest/mocks/LayeredStoreSupplier.go b/store/storetest/mocks/LayeredStoreSupplier.go index 6d4e655e22..18f9642bf8 100644 --- a/store/storetest/mocks/LayeredStoreSupplier.go +++ b/store/storetest/mocks/LayeredStoreSupplier.go @@ -5,12 +5,8 @@ package mocks import ( - context "context" - - model "github.com/mattermost/mattermost-server/model" - mock "github.com/stretchr/testify/mock" - store "github.com/mattermost/mattermost-server/store" + mock "github.com/stretchr/testify/mock" ) // LayeredStoreSupplier is an autogenerated mock type for the LayeredStoreSupplier type @@ -34,404 +30,6 @@ func (_m *LayeredStoreSupplier) Next() store.LayeredStoreSupplier { return r0 } -// RoleDelete provides a mock function with given fields: ctx, roldId, hints -func (_m *LayeredStoreSupplier) RoleDelete(ctx context.Context, roldId string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, roldId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, roldId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, roldId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGet provides a mock function with given fields: ctx, roleId, hints -func (_m *LayeredStoreSupplier) RoleGet(ctx context.Context, roleId string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, roleId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, roleId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, roleId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGetAll provides a mock function with given fields: ctx, hints -func (_m *LayeredStoreSupplier) RoleGetAll(ctx context.Context, hints ...store.LayeredStoreHint) ([]*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []*model.Role - if rf, ok := ret.Get(0).(func(context.Context, ...store.LayeredStoreHint) []*model.Role); ok { - r0 = rf(ctx, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGetByName provides a mock function with given fields: ctx, name, hints -func (_m *LayeredStoreSupplier) RoleGetByName(ctx context.Context, name string, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, name) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, name, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, name, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RoleGetByNames provides a mock function with given fields: ctx, names, hints -func (_m *LayeredStoreSupplier) RoleGetByNames(ctx context.Context, names []string, hints ...store.LayeredStoreHint) ([]*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, names) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []*model.Role - if rf, ok := ret.Get(0).(func(context.Context, []string, ...store.LayeredStoreHint) []*model.Role); ok { - r0 = rf(ctx, names, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, []string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, names, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// RolePermanentDeleteAll provides a mock function with given fields: ctx, hints -func (_m *LayeredStoreSupplier) RolePermanentDeleteAll(ctx context.Context, hints ...store.LayeredStoreHint) *model.AppError { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.AppError - if rf, ok := ret.Get(0).(func(context.Context, ...store.LayeredStoreHint) *model.AppError); ok { - r0 = rf(ctx, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.AppError) - } - } - - return r0 -} - -// RoleSave provides a mock function with given fields: ctx, role, hints -func (_m *LayeredStoreSupplier) RoleSave(ctx context.Context, role *model.Role, hints ...store.LayeredStoreHint) (*model.Role, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, role) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Role - if rf, ok := ret.Get(0).(func(context.Context, *model.Role, ...store.LayeredStoreHint) *model.Role); ok { - r0 = rf(ctx, role, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Role) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, *model.Role, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, role, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeDelete provides a mock function with given fields: ctx, schemeId, hints -func (_m *LayeredStoreSupplier) SchemeDelete(ctx context.Context, schemeId string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, schemeId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeGet provides a mock function with given fields: ctx, schemeId, hints -func (_m *LayeredStoreSupplier) SchemeGet(ctx context.Context, schemeId string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, schemeId) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, schemeId, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeGetAllPage provides a mock function with given fields: ctx, scope, offset, limit, hints -func (_m *LayeredStoreSupplier) SchemeGetAllPage(ctx context.Context, scope string, offset int, limit int, hints ...store.LayeredStoreHint) ([]*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, scope, offset, limit) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []*model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, int, int, ...store.LayeredStoreHint) []*model.Scheme); ok { - r0 = rf(ctx, scope, offset, limit, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, int, int, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, scope, offset, limit, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemeGetByName provides a mock function with given fields: ctx, schemeName, hints -func (_m *LayeredStoreSupplier) SchemeGetByName(ctx context.Context, schemeName string, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, schemeName) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, string, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, schemeName, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, string, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, schemeName, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - -// SchemePermanentDeleteAll provides a mock function with given fields: ctx, hints -func (_m *LayeredStoreSupplier) SchemePermanentDeleteAll(ctx context.Context, hints ...store.LayeredStoreHint) *model.AppError { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.AppError - if rf, ok := ret.Get(0).(func(context.Context, ...store.LayeredStoreHint) *model.AppError); ok { - r0 = rf(ctx, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.AppError) - } - } - - return r0 -} - -// SchemeSave provides a mock function with given fields: ctx, scheme, hints -func (_m *LayeredStoreSupplier) SchemeSave(ctx context.Context, scheme *model.Scheme, hints ...store.LayeredStoreHint) (*model.Scheme, *model.AppError) { - _va := make([]interface{}, len(hints)) - for _i := range hints { - _va[_i] = hints[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, scheme) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *model.Scheme - if rf, ok := ret.Get(0).(func(context.Context, *model.Scheme, ...store.LayeredStoreHint) *model.Scheme); ok { - r0 = rf(ctx, scheme, hints...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*model.Scheme) - } - } - - var r1 *model.AppError - if rf, ok := ret.Get(1).(func(context.Context, *model.Scheme, ...store.LayeredStoreHint) *model.AppError); ok { - r1 = rf(ctx, scheme, hints...) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).(*model.AppError) - } - } - - return r0, r1 -} - // SetChainNext provides a mock function with given fields: _a0 func (_m *LayeredStoreSupplier) SetChainNext(_a0 store.LayeredStoreSupplier) { _m.Called(_a0) diff --git a/store/storetest/mocks/SqlSupplier.go b/store/storetest/mocks/SqlSupplier.go index 4a844524d3..2b4f7c746f 100644 --- a/store/storetest/mocks/SqlSupplier.go +++ b/store/storetest/mocks/SqlSupplier.go @@ -4,8 +4,10 @@ package mocks -import gorp "github.com/mattermost/gorp" -import mock "github.com/stretchr/testify/mock" +import ( + gorp "github.com/mattermost/gorp" + mock "github.com/stretchr/testify/mock" +) // SqlSupplier is an autogenerated mock type for the SqlSupplier type type SqlSupplier struct { diff --git a/store/storetest/mocks/UserStore.go b/store/storetest/mocks/UserStore.go index e901c81d70..43931cd758 100644 --- a/store/storetest/mocks/UserStore.go +++ b/store/storetest/mocks/UserStore.go @@ -709,13 +709,13 @@ func (_m *UserStore) GetProfilesNotInTeam(teamId string, groupConstrained bool, return r0, r1 } -// GetProfilesWithoutTeam provides a mock function with given fields: offset, limit, viewRestrictions -func (_m *UserStore) GetProfilesWithoutTeam(offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) { - ret := _m.Called(offset, limit, viewRestrictions) +// GetProfilesWithoutTeam provides a mock function with given fields: options +func (_m *UserStore) GetProfilesWithoutTeam(options *model.UserGetOptions) ([]*model.User, *model.AppError) { + ret := _m.Called(options) var r0 []*model.User - if rf, ok := ret.Get(0).(func(int, int, *model.ViewUsersRestrictions) []*model.User); ok { - r0 = rf(offset, limit, viewRestrictions) + if rf, ok := ret.Get(0).(func(*model.UserGetOptions) []*model.User); ok { + r0 = rf(options) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]*model.User) @@ -723,8 +723,8 @@ func (_m *UserStore) GetProfilesWithoutTeam(offset int, limit int, viewRestricti } var r1 *model.AppError - if rf, ok := ret.Get(1).(func(int, int, *model.ViewUsersRestrictions) *model.AppError); ok { - r1 = rf(offset, limit, viewRestrictions) + if rf, ok := ret.Get(1).(func(*model.UserGetOptions) *model.AppError); ok { + r1 = rf(options) } else { if ret.Get(1) != nil { r1 = ret.Get(1).(*model.AppError) diff --git a/store/storetest/user_store.go b/store/storetest/user_store.go index 58eed620ff..b538248e67 100644 --- a/store/storetest/user_store.go +++ b/store/storetest/user_store.go @@ -919,6 +919,8 @@ func testUserStoreGetProfilesWithoutTeam(t *testing.T, ss store.Store) { u3, err := ss.User().Save(&model.User{ Email: MakeEmail(), Username: "u3" + model.NewId(), + DeleteAt: 1, + Roles: "system_admin", }) require.Nil(t, err) defer func() { require.Nil(t, ss.User().PermanentDelete(u3.Id)) }() @@ -931,23 +933,35 @@ func testUserStoreGetProfilesWithoutTeam(t *testing.T, ss store.Store) { u3.IsBot = true defer func() { require.Nil(t, ss.Bot().PermanentDelete(u3.Id)) }() - t.Run("get, offset 0, limit 100", func(t *testing.T) { - users, err := ss.User().GetProfilesWithoutTeam(0, 100, nil) + t.Run("get, page 0, per_page 100", func(t *testing.T) { + users, err := ss.User().GetProfilesWithoutTeam(&model.UserGetOptions{Page: 0, PerPage: 100}) require.Nil(t, err) assert.Equal(t, []*model.User{sanitized(u2), sanitized(u3)}, users) }) - t.Run("get, offset 1, limit 1", func(t *testing.T) { - users, err := ss.User().GetProfilesWithoutTeam(1, 1, nil) + t.Run("get, page 1, per_page 1", func(t *testing.T) { + users, err := ss.User().GetProfilesWithoutTeam(&model.UserGetOptions{Page: 1, PerPage: 1}) require.Nil(t, err) assert.Equal(t, []*model.User{sanitized(u3)}, users) }) - t.Run("get, offset 2, limit 1", func(t *testing.T) { - users, err := ss.User().GetProfilesWithoutTeam(2, 1, nil) + t.Run("get, page 2, per_page 1", func(t *testing.T) { + users, err := ss.User().GetProfilesWithoutTeam(&model.UserGetOptions{Page: 2, PerPage: 1}) require.Nil(t, err) assert.Equal(t, []*model.User{}, users) }) + + t.Run("get, page 0, per_page 100, inactive", func(t *testing.T) { + users, err := ss.User().GetProfilesWithoutTeam(&model.UserGetOptions{Page: 0, PerPage: 100, Inactive: true}) + require.Nil(t, err) + assert.Equal(t, []*model.User{sanitized(u3)}, users) + }) + + t.Run("get, page 0, per_page 100, role", func(t *testing.T) { + users, err := ss.User().GetProfilesWithoutTeam(&model.UserGetOptions{Page: 0, PerPage: 100, Role: "system_admin"}) + require.Nil(t, err) + assert.Equal(t, []*model.User{sanitized(u3)}, users) + }) } func testUserStoreGetAllProfilesInChannel(t *testing.T, ss store.Store) { diff --git a/store/timer_layer.go b/store/timer_layer.go index 577852408e..5bdbc5c00b 100644 --- a/store/timer_layer.go +++ b/store/timer_layer.go @@ -1195,6 +1195,40 @@ func (s *TimerLayerChannelStore) GetMoreChannels(teamId string, userId string, o return resultVar0, resultVar1 } +func (s *TimerLayerChannelStore) GetPinnedPostCount(channelId string, allowFromCache bool) (int64, *model.AppError) { + start := timemodule.Now() + + resultVar0, resultVar1 := s.ChannelStore.GetPinnedPostCount(channelId, allowFromCache) + + t := timemodule.Now() + elapsed := t.Sub(start) + if s.Root.Metrics != nil { + success := "false" + if resultVar1 == nil { + success = "true" + } + s.Root.Metrics.ObserveStoreMethodDuration("ChannelStore.GetPinnedPostCount", success, float64(elapsed)) + } + return resultVar0, resultVar1 +} + +func (s *TimerLayerChannelStore) GetPinnedPostCountFromCache(channelId string) int64 { + start := timemodule.Now() + + resultVar0 := s.ChannelStore.GetPinnedPostCountFromCache(channelId) + + t := timemodule.Now() + elapsed := t.Sub(start) + if s.Root.Metrics != nil { + success := "false" + if true { + success = "true" + } + s.Root.Metrics.ObserveStoreMethodDuration("ChannelStore.GetPinnedPostCountFromCache", success, float64(elapsed)) + } + return resultVar0 +} + func (s *TimerLayerChannelStore) GetPinnedPosts(channelId string) (*model.PostList, *model.AppError) { start := timemodule.Now() @@ -5360,6 +5394,40 @@ func (s *TimerLayerTeamStore) AnalyticsGetTeamCountForScheme(schemeId string) (i return resultVar0, resultVar1 } +func (s *TimerLayerTeamStore) AnalyticsPrivateTeamCount() (int64, *model.AppError) { + start := timemodule.Now() + + resultVar0, resultVar1 := s.TeamStore.AnalyticsPrivateTeamCount() + + t := timemodule.Now() + elapsed := t.Sub(start) + if s.Root.Metrics != nil { + success := "false" + if resultVar1 == nil { + success = "true" + } + s.Root.Metrics.ObserveStoreMethodDuration("TeamStore.AnalyticsPrivateTeamCount", success, float64(elapsed)) + } + return resultVar0, resultVar1 +} + +func (s *TimerLayerTeamStore) AnalyticsPublicTeamCount() (int64, *model.AppError) { + start := timemodule.Now() + + resultVar0, resultVar1 := s.TeamStore.AnalyticsPublicTeamCount() + + t := timemodule.Now() + elapsed := t.Sub(start) + if s.Root.Metrics != nil { + success := "false" + if resultVar1 == nil { + success = "true" + } + s.Root.Metrics.ObserveStoreMethodDuration("TeamStore.AnalyticsPublicTeamCount", success, float64(elapsed)) + } + return resultVar0, resultVar1 +} + func (s *TimerLayerTeamStore) AnalyticsTeamCount() (int64, *model.AppError) { start := timemodule.Now() @@ -5530,6 +5598,23 @@ func (s *TimerLayerTeamStore) GetAllPrivateTeamPageListing(offset int, limit int return resultVar0, resultVar1 } +func (s *TimerLayerTeamStore) GetAllPublicTeamPageListing(offset int, limit int) ([]*model.Team, *model.AppError) { + start := timemodule.Now() + + resultVar0, resultVar1 := s.TeamStore.GetAllPublicTeamPageListing(offset, limit) + + t := timemodule.Now() + elapsed := t.Sub(start) + if s.Root.Metrics != nil { + success := "false" + if resultVar1 == nil { + success = "true" + } + s.Root.Metrics.ObserveStoreMethodDuration("TeamStore.GetAllPublicTeamPageListing", success, float64(elapsed)) + } + return resultVar0, resultVar1 +} + func (s *TimerLayerTeamStore) GetAllTeamListing() ([]*model.Team, *model.AppError) { start := timemodule.Now() @@ -6737,10 +6822,10 @@ func (s *TimerLayerUserStore) GetProfilesNotInTeam(teamId string, groupConstrain return resultVar0, resultVar1 } -func (s *TimerLayerUserStore) GetProfilesWithoutTeam(offset int, limit int, viewRestrictions *model.ViewUsersRestrictions) ([]*model.User, *model.AppError) { +func (s *TimerLayerUserStore) GetProfilesWithoutTeam(options *model.UserGetOptions) ([]*model.User, *model.AppError) { start := timemodule.Now() - resultVar0, resultVar1 := s.UserStore.GetProfilesWithoutTeam(offset, limit, viewRestrictions) + resultVar0, resultVar1 := s.UserStore.GetProfilesWithoutTeam(options) t := timemodule.Now() elapsed := t.Sub(start) From 3a3676eb123baa9ac13f917419b2d4a6becbd566 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Tue, 17 Sep 2019 21:54:43 +0200 Subject: [PATCH 50/53] Converting to structured logging the file cmd/mattermost/commands/utils.go (#12091) --- cmd/mattermost/commands/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/mattermost/commands/utils.go b/cmd/mattermost/commands/utils.go index 6de473c4a4..8267f813e7 100644 --- a/cmd/mattermost/commands/utils.go +++ b/cmd/mattermost/commands/utils.go @@ -22,7 +22,7 @@ func prettyPrintStruct(t interface{}) string { func structToMap(t interface{}) map[string]interface{} { defer func() { if r := recover(); r != nil { - mlog.Error(fmt.Sprintf("Panicked in structToMap. This should never happen. %v", r)) + mlog.Error("Panicked in structToMap. This should never happen.", mlog.Any("recover", r)) } }() From 3323e7a6195f17776cc0aad0cc7d622767118654 Mon Sep 17 00:00:00 2001 From: Ben Schumacher Date: Wed, 18 Sep 2019 18:23:58 +0200 Subject: [PATCH 51/53] [MM-17109] Bump model version to 5.16 (#12172) * Bump model version to 5.16 * Make db schema version indipended of app version --- model/version.go | 1 + store/sqlstore/upgrade.go | 1 + store/sqlstore/upgrade_test.go | 23 +++++++++++------------ 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/model/version.go b/model/version.go index 6fc625b95b..8f783a17ae 100644 --- a/model/version.go +++ b/model/version.go @@ -13,6 +13,7 @@ import ( // It should be maintained in chronological order with most current // release at the front of the list. var versions = []string{ + "5.16.0", "5.15.0", "5.14.0", "5.13.0", diff --git a/store/sqlstore/upgrade.go b/store/sqlstore/upgrade.go index e118f4e2a6..b6e43f20c8 100644 --- a/store/sqlstore/upgrade.go +++ b/store/sqlstore/upgrade.go @@ -20,6 +20,7 @@ import ( ) const ( + CURRENT_SCHEMA_VERSION = VERSION_5_15_0 VERSION_5_16_0 = "5.16.0" VERSION_5_15_0 = "5.15.0" VERSION_5_14_0 = "5.14.0" diff --git a/store/sqlstore/upgrade_test.go b/store/sqlstore/upgrade_test.go index eae894eff4..47c2dc3075 100644 --- a/store/sqlstore/upgrade_test.go +++ b/store/sqlstore/upgrade_test.go @@ -6,7 +6,6 @@ package sqlstore import ( "testing" - "github.com/mattermost/mattermost-server/model" "github.com/mattermost/mattermost-server/store" "github.com/stretchr/testify/require" ) @@ -36,25 +35,25 @@ func TestStoreUpgrade(t *testing.T) { t.Run("upgrade from earliest supported version", func(t *testing.T) { saveSchemaVersion(sqlStore, VERSION_3_0_0) - err := UpgradeDatabase(sqlStore, model.CurrentVersion) + err := UpgradeDatabase(sqlStore, CURRENT_SCHEMA_VERSION) require.NoError(t, err) - require.Equal(t, model.CurrentVersion, sqlStore.GetCurrentSchemaVersion()) + require.Equal(t, CURRENT_SCHEMA_VERSION, sqlStore.GetCurrentSchemaVersion()) }) t.Run("upgrade from no existing version", func(t *testing.T) { saveSchemaVersion(sqlStore, "") - err := UpgradeDatabase(sqlStore, model.CurrentVersion) + err := UpgradeDatabase(sqlStore, CURRENT_SCHEMA_VERSION) require.NoError(t, err) - require.Equal(t, model.CurrentVersion, sqlStore.GetCurrentSchemaVersion()) + require.Equal(t, CURRENT_SCHEMA_VERSION, sqlStore.GetCurrentSchemaVersion()) }) t.Run("upgrade schema running earlier minor version", func(t *testing.T) { saveSchemaVersion(sqlStore, "5.1.0") err := UpgradeDatabase(sqlStore, "5.8.0") require.NoError(t, err) - // Assert model.CurrentVersion, not 5.8.0, since the migrations will move + // Assert CURRENT_SCHEMA_VERSION, not 5.8.0, since the migrations will move // past 5.8.0 regardless of the input parameter. - require.Equal(t, model.CurrentVersion, sqlStore.GetCurrentSchemaVersion()) + require.Equal(t, CURRENT_SCHEMA_VERSION, sqlStore.GetCurrentSchemaVersion()) }) t.Run("upgrade schema running later minor version", func(t *testing.T) { @@ -66,9 +65,9 @@ func TestStoreUpgrade(t *testing.T) { t.Run("upgrade schema running earlier major version", func(t *testing.T) { saveSchemaVersion(sqlStore, "4.1.0") - err := UpgradeDatabase(sqlStore, model.CurrentVersion) + err := UpgradeDatabase(sqlStore, CURRENT_SCHEMA_VERSION) require.NoError(t, err) - require.Equal(t, model.CurrentVersion, sqlStore.GetCurrentSchemaVersion()) + require.Equal(t, CURRENT_SCHEMA_VERSION, sqlStore.GetCurrentSchemaVersion()) }) t.Run("upgrade schema running later major version", func(t *testing.T) { @@ -94,12 +93,12 @@ func TestSaveSchemaVersion(t *testing.T) { }) t.Run("set current version", func(t *testing.T) { - saveSchemaVersion(sqlStore, model.CurrentVersion) + saveSchemaVersion(sqlStore, CURRENT_SCHEMA_VERSION) props, err := ss.System().Get() require.Nil(t, err) - require.Equal(t, model.CurrentVersion, props["Version"]) - require.Equal(t, model.CurrentVersion, sqlStore.GetCurrentSchemaVersion()) + require.Equal(t, CURRENT_SCHEMA_VERSION, props["Version"]) + require.Equal(t, CURRENT_SCHEMA_VERSION, sqlStore.GetCurrentSchemaVersion()) }) }) } From 581cdf158c0fd354f39d20149db18020da514541 Mon Sep 17 00:00:00 2001 From: Nikhil Ranjan Date: Wed, 18 Sep 2019 18:38:28 +0200 Subject: [PATCH 52/53] Convert app/license_test.go t.Fatal calls into assert/require calls (#12218) --- app/license_test.go | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/app/license_test.go b/app/license_test.go index 225d4e54d9..83bfc158de 100644 --- a/app/license_test.go +++ b/app/license_test.go @@ -8,6 +8,7 @@ import ( "github.com/mattermost/mattermost-server/model" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoadLicense(t *testing.T) { @@ -15,9 +16,7 @@ func TestLoadLicense(t *testing.T) { defer th.TearDown() th.App.LoadLicense() - if th.App.License() != nil { - t.Fatal("shouldn't have a valid license") - } + require.Nil(t, th.App.License(), "shouldn't have a valid license") } func TestSaveLicense(t *testing.T) { @@ -26,18 +25,16 @@ func TestSaveLicense(t *testing.T) { b1 := []byte("junk") - if _, err := th.App.SaveLicense(b1); err == nil { - t.Fatal("shouldn't have saved license") - } + _, err := th.App.SaveLicense(b1) + require.NotNil(t, err, "shouldn't have saved license") } func TestRemoveLicense(t *testing.T) { th := Setup(t) defer th.TearDown() - if err := th.App.RemoveLicense(); err != nil { - t.Fatal("should have removed license") - } + err := th.App.RemoveLicense() + require.Nil(t, err, "should have removed license") } func TestSetLicense(t *testing.T) { @@ -49,18 +46,16 @@ func TestSetLicense(t *testing.T) { l1.Customer = &model.Customer{} l1.StartsAt = model.GetMillis() - 1000 l1.ExpiresAt = model.GetMillis() + 100000 - if ok := th.App.SetLicense(l1); !ok { - t.Fatal("license should have worked") - } + ok := th.App.SetLicense(l1) + require.True(t, ok, "license should have worked") l3 := &model.License{} l3.Features = &model.Features{} l3.Customer = &model.Customer{} l3.StartsAt = model.GetMillis() + 10000 l3.ExpiresAt = model.GetMillis() + 100000 - if ok := th.App.SetLicense(l3); !ok { - t.Fatal("license should have passed") - } + ok = th.App.SetLicense(l3) + require.True(t, ok, "license should have passed") } func TestClientLicenseEtag(t *testing.T) { @@ -72,16 +67,12 @@ func TestClientLicenseEtag(t *testing.T) { th.App.SetClientLicense(map[string]string{"SomeFeature": "true", "IsLicensed": "true"}) etag2 := th.App.GetClientLicenseEtag(false) - if etag1 == etag2 { - t.Fatal("etags should not match") - } + require.NotEqual(t, etag1, etag2, "etags should not match") th.App.SetClientLicense(map[string]string{"SomeFeature": "true", "IsLicensed": "false"}) etag3 := th.App.GetClientLicenseEtag(false) - if etag2 == etag3 { - t.Fatal("etags should not match") - } + require.NotEqual(t, etag2, etag3, "etags should not match") } func TestGetSanitizedClientLicense(t *testing.T) { From c7b583ccdd894cff8b846f9487b84560cc38897f Mon Sep 17 00:00:00 2001 From: Carlos Tadeu Panato Junior Date: Wed, 18 Sep 2019 18:39:01 +0200 Subject: [PATCH 53/53] use mkdirall instead of mkdir (#12032) * use mkdirall instead of mkdir * revert * create the folder here --- api4/apitestlib.go | 11 ----------- testlib/resources.go | 12 +++++++++--- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/api4/apitestlib.go b/api4/apitestlib.go index 32aa279c67..ba0a3fb24f 100644 --- a/api4/apitestlib.go +++ b/api4/apitestlib.go @@ -9,7 +9,6 @@ import ( "net" "net/http" "os" - "path/filepath" "reflect" "strconv" "strings" @@ -138,16 +137,6 @@ func setupTestHelper(enterprise bool, updateConfig func(*model.Config)) *TestHel th.tempWorkspace = dir } - pluginDir := filepath.Join(th.tempWorkspace, "plugins") - webappDir := filepath.Join(th.tempWorkspace, "webapp") - - th.App.UpdateConfig(func(cfg *model.Config) { - *cfg.PluginSettings.Directory = pluginDir - *cfg.PluginSettings.ClientDirectory = webappDir - }) - - th.App.InitPlugins(pluginDir, webappDir) - return th } diff --git a/testlib/resources.go b/testlib/resources.go index 6a253217f6..8f058df58d 100644 --- a/testlib/resources.go +++ b/testlib/resources.go @@ -111,6 +111,12 @@ func SetupTestResources() (string, error) { return "", errors.Wrapf(err, "failed to create plugins directory %s", pluginsDir) } + clientDir := path.Join(tempDir, "client") + err = os.Mkdir(clientDir, 0700) + if err != nil { + return "", errors.Wrapf(err, "failed to create client directory %s", clientDir) + } + err = setupConfig(path.Join(tempDir, "config")) if err != nil { return "", errors.Wrap(err, "failed to setup config") @@ -168,10 +174,10 @@ func setupConfig(configDir string) error { return errors.Wrapf(err, "failed to create config directory %s", configDir) } - configJson := path.Join(configDir, "config.json") - err = ioutil.WriteFile(configJson, []byte(config.ToJson()), 0644) + configJSON := path.Join(configDir, "config.json") + err = ioutil.WriteFile(configJSON, []byte(config.ToJson()), 0644) if err != nil { - return errors.Wrapf(err, "failed to write config to %s", configJson) + return errors.Wrapf(err, "failed to write config to %s", configJSON) } return nil