mirror of
https://github.com/mattermost/mattermost.git
synced 2025-02-25 18:55:24 -06:00
Merge branch 'master' into MM-52097
This commit is contained in:
commit
f147b37dca
10
.github/workflows/channels-ci.yml
vendored
10
.github/workflows/channels-ci.yml
vendored
@ -83,6 +83,16 @@ jobs:
|
||||
npm run mmjstool -- i18n clean-empty --webapp-dir ./src --mobile-dir /tmp/fake-mobile-dir --check
|
||||
npm run mmjstool -- i18n check-empty-src --webapp-dir ./src --mobile-dir /tmp/fake-mobile-dir
|
||||
rm -rf tmp
|
||||
- name: ci/lint-boards
|
||||
working-directory: webapp/boards
|
||||
run: |
|
||||
npm run i18n-extract
|
||||
git --no-pager diff --exit-code i18n/en.json || (echo "Please run \"cd webapp/boards && npm run i18n-extract\" and commit the changes in webapp/boards/i18n/en.json." && exit 1)
|
||||
- name: ci/lint-playbooks
|
||||
working-directory: webapp/playbooks
|
||||
run: |
|
||||
npm run i18n-extract
|
||||
git --no-pager diff --exit-code i18n/en.json || (echo "Please run \"cd webapp/playbooks && npm run i18n-extract\" and commit the changes in webapp/playbooks/i18n/en.json." && exit 1)
|
||||
check-types:
|
||||
runs-on: ubuntu-22.04
|
||||
defaults:
|
||||
|
159
.github/workflows/esrupgrade-common.yml
vendored
Normal file
159
.github/workflows/esrupgrade-common.yml
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
name: ESR Upgrade
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
db-dump-url:
|
||||
required: true
|
||||
type: string
|
||||
initial-version:
|
||||
required: true
|
||||
type: string
|
||||
final-version:
|
||||
required: true
|
||||
type: string
|
||||
env:
|
||||
COMPOSE_PROJECT_NAME: ghactions
|
||||
BUILD_IMAGE: mattermost/mattermost-enterprise-edition:${{ inputs.final-version }}
|
||||
MYSQL_CONN_ARGS: -h localhost -P 3306 --protocol=tcp -ummuser -pmostest mattermost_test
|
||||
DUMP_SERVER_NAME: esr.${{ inputs.initial-version }}-${{ inputs.final-version }}.dump.server.sql
|
||||
DUMP_SCRIPT_NAME: esr.${{ inputs.initial-version }}-${{ inputs.final-version }}.dump.script.sql
|
||||
MIGRATION_SCRIPT: esr.${{ inputs.initial-version }}-${{ inputs.final-version }}.mysql.up.sql
|
||||
CLEANUP_SCRIPT: esr.${{ inputs.initial-version }}-${{ inputs.final-version }}.mysql.cleanup.sql
|
||||
PREPROCESS_SCRIPT: esr.common.mysql.preprocess.sql
|
||||
DIFF_NAME: esr.${{ inputs.initial-version }}-${{ inputs.final-version }}.diff
|
||||
jobs:
|
||||
esr-upgrade-server:
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout mattermost-server
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
cd server/build
|
||||
docker-compose --no-ansi run --rm start_dependencies
|
||||
cat ../tests/test-data.ldif | docker-compose --no-ansi exec -T openldap bash -c 'ldapadd -x -D "cn=admin,dc=mm,dc=test,dc=com" -w mostest';
|
||||
docker-compose --no-ansi exec -T minio sh -c 'mkdir -p /data/mattermost-test';
|
||||
docker-compose --no-ansi ps
|
||||
- name: Wait for docker compose
|
||||
run: |
|
||||
until docker network inspect ghactions_mm-test; do echo "Waiting for Docker Compose Network..."; sleep 1; done;
|
||||
docker run --net ghactions_mm-test appropriate/curl:latest sh -c "until curl --max-time 5 --output - http://mysql:3306; do echo waiting for mysql; sleep 5; done;"
|
||||
docker run --net ghactions_mm-test appropriate/curl:latest sh -c "until curl --max-time 5 --output - http://elasticsearch:9200; do echo waiting for elasticsearch; sleep 5; done;"
|
||||
- name: Initialize the database with the source DB dump
|
||||
run: |
|
||||
curl ${{ inputs.db-dump-url }} | zcat | docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS
|
||||
- name: Common preprocessing of the DB dump
|
||||
run: |
|
||||
cd server/scripts/esrupgrades
|
||||
docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS < $PREPROCESS_SCRIPT
|
||||
- name: Pull EE image
|
||||
run: |
|
||||
docker pull $BUILD_IMAGE
|
||||
- name: Run migration through server
|
||||
run: |
|
||||
mkdir -p client/plugins
|
||||
cd server/build
|
||||
# Run the server in the background to trigger the migrations
|
||||
docker run --name mmserver \
|
||||
--net ghactions_mm-test \
|
||||
--ulimit nofile=8096:8096 \
|
||||
--env-file=dotenv/test.env \
|
||||
--env MM_SQLSETTINGS_DRIVERNAME="mysql" \
|
||||
--env MM_SQLSETTINGS_DATASOURCE="mmuser:mostest@tcp(mysql:3306)/mattermost_test?charset=utf8mb4,utf8&multiStatements=true" \
|
||||
-v ~/work/mattermost-server:/mattermost-server \
|
||||
-w /mattermost-server/mattermost-server \
|
||||
$BUILD_IMAGE &
|
||||
# In parallel, wait for the migrations to finish.
|
||||
# To verify this, we check that the server has finished the startup job through the log line "Server is listening on"
|
||||
until docker logs mmserver | grep "Server is listening on"; do\
|
||||
echo "Waiting for migrations to finish..."; \
|
||||
sleep 1; \
|
||||
done;
|
||||
# Make sure to stop the server. Also, redirect output to null;
|
||||
# otherwise, the name of the container gets written to the console, which is weird
|
||||
docker stop mmserver > /dev/null
|
||||
- name: Cleanup DB
|
||||
run : |
|
||||
cd server/scripts/esrupgrades
|
||||
docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS < $CLEANUP_SCRIPT
|
||||
- name: Dump upgraded database
|
||||
run: |
|
||||
# Use --skip-opt to have each INSERT into one line.
|
||||
# Use --set-gtid-purged=OFF to suppress GTID-related statements.
|
||||
docker exec -i ghactions_mysql_1 mysqldump \
|
||||
--skip-opt --set-gtid-purged=OFF \
|
||||
$MYSQL_CONN_ARGS > $DUMP_SERVER_NAME
|
||||
- name: Cleanup dump and compress
|
||||
run: |
|
||||
# We skip the very last line, which simply contains the date of the dump
|
||||
head -n -1 ${DUMP_SERVER_NAME} | gzip > ${DUMP_SERVER_NAME}.gz
|
||||
- name: Upload dump
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: upgraded-dump-server
|
||||
path: ${{ env.DUMP_SERVER_NAME }}.gz
|
||||
esr-upgrade-script:
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout mattermost-server
|
||||
uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3.3.0
|
||||
- name: Run docker compose
|
||||
run: |
|
||||
cd server/build
|
||||
docker-compose --no-ansi run --rm start_dependencies
|
||||
cat ../tests/test-data.ldif | docker-compose --no-ansi exec -T openldap bash -c 'ldapadd -x -D "cn=admin,dc=mm,dc=test,dc=com" -w mostest';
|
||||
docker-compose --no-ansi exec -T minio sh -c 'mkdir -p /data/mattermost-test';
|
||||
docker-compose --no-ansi ps
|
||||
- name: Wait for docker compose
|
||||
run: |
|
||||
until docker network inspect ghactions_mm-test; do echo "Waiting for Docker Compose Network..."; sleep 1; done;
|
||||
docker run --net ghactions_mm-test appropriate/curl:latest sh -c "until curl --max-time 5 --output - http://mysql:3306; do echo waiting for mysql; sleep 5; done;"
|
||||
docker run --net ghactions_mm-test appropriate/curl:latest sh -c "until curl --max-time 5 --output - http://elasticsearch:9200; do echo waiting for elasticsearch; sleep 5; done;"
|
||||
- name: Initialize the database with the source DB dump
|
||||
run: |
|
||||
curl ${{ inputs.db-dump-url }} | zcat | docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS
|
||||
- name: Preprocess the DB dump
|
||||
run: |
|
||||
cd server/scripts/esrupgrades
|
||||
docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS < $PREPROCESS_SCRIPT
|
||||
- name: Run migration through script
|
||||
run : |
|
||||
cd server/scripts/esrupgrades
|
||||
docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS < $MIGRATION_SCRIPT
|
||||
- name: Cleanup DB
|
||||
run : |
|
||||
cd server/scripts/esrupgrades
|
||||
docker exec -i ghactions_mysql_1 mysql -AN $MYSQL_CONN_ARGS < $CLEANUP_SCRIPT
|
||||
- name: Dump upgraded database
|
||||
run: |
|
||||
docker exec -i ghactions_mysql_1 mysqldump --skip-opt --set-gtid-purged=OFF $MYSQL_CONN_ARGS > $DUMP_SCRIPT_NAME
|
||||
- name: Cleanup dump and compress
|
||||
run: |
|
||||
# We skip the very last line, which simply contains the date of the dump
|
||||
head -n -1 ${DUMP_SCRIPT_NAME} | gzip > ${DUMP_SCRIPT_NAME}.gz
|
||||
- name: Upload dump
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: upgraded-dump-script
|
||||
path: ${{ env.DUMP_SCRIPT_NAME }}.gz
|
||||
esr-upgrade-diff:
|
||||
runs-on: ubuntu-latest-8-cores
|
||||
needs:
|
||||
- esr-upgrade-server
|
||||
- esr-upgrade-script
|
||||
steps:
|
||||
- name: Retrieve dumps
|
||||
uses: actions/download-artifact@v3
|
||||
- name: Diff dumps
|
||||
run: |
|
||||
gzip -d upgraded-dump-server/${DUMP_SERVER_NAME}.gz
|
||||
gzip -d upgraded-dump-script/${DUMP_SCRIPT_NAME}.gz
|
||||
diff upgraded-dump-server/$DUMP_SERVER_NAME upgraded-dump-script/$DUMP_SCRIPT_NAME > $DIFF_NAME
|
||||
- name: Upload diff
|
||||
if: failure() # Upload the diff only if the previous step failed; i.e., if the diff is non-empty
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: dumps-diff
|
||||
path: ${{ env.DIFF_NAME }}
|
33
.github/workflows/esrupgrade.yml
vendored
Normal file
33
.github/workflows/esrupgrade.yml
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
name: ESR Upgrade
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'server/scripts/esrupgrades/*'
|
||||
- '.github/workflows/esr*'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- cloud
|
||||
- release-*
|
||||
jobs:
|
||||
esr-upgrade-5_37-7_8:
|
||||
name: Run ESR upgrade script from 5.37 to 7.8
|
||||
uses: ./.github/workflows/esrupgrade-common.yml
|
||||
with:
|
||||
db-dump-url: https://lt-public-data.s3.amazonaws.com/47K_537_mysql_collationfixed.sql.gz
|
||||
initial-version: 5.37
|
||||
final-version: 7.8
|
||||
esr-upgrade-5_37-6_3:
|
||||
name: Run ESR upgrade script from 5.37 to 6.3
|
||||
uses: ./.github/workflows/esrupgrade-common.yml
|
||||
with:
|
||||
db-dump-url: https://lt-public-data.s3.amazonaws.com/47K_537_mysql_collationfixed.sql.gz
|
||||
initial-version: 5.37
|
||||
final-version: 6.3
|
||||
esr-upgrade-6_3-7_8:
|
||||
name: Run ESR upgrade script from 6.3 to 7.8
|
||||
uses: ./.github/workflows/esrupgrade-common.yml
|
||||
with:
|
||||
db-dump-url: https://lt-public-data.s3.amazonaws.com/47K_63_mysql.sql.gz
|
||||
initial-version: 6.3
|
||||
final-version: 7.8
|
@ -5,3 +5,6 @@
|
||||
/webapp/package-lock.json @mattermost/web-platform
|
||||
/webapp/platform/*/package.json @mattermost/web-platform
|
||||
/webapp/scripts @mattermost/web-platform
|
||||
/server/channels/db/migrations @mattermost/server-platform
|
||||
/server/boards/services/store/sqlstore/migrations @mattermost/server-platform
|
||||
/server/playbooks/server/sqlstore/migrations @mattermost/server-platform
|
||||
|
File diff suppressed because one or more lines are too long
@ -170,7 +170,6 @@ const defaultServerConfig: AdminConfig = {
|
||||
EnableCustomGroups: true,
|
||||
SelfHostedPurchase: true,
|
||||
AllowSyncedDrafts: true,
|
||||
SelfHostedExpansion: false,
|
||||
},
|
||||
TeamSettings: {
|
||||
SiteName: 'Mattermost',
|
||||
@ -665,7 +664,6 @@ const defaultServerConfig: AdminConfig = {
|
||||
BoardsFeatureFlags: '',
|
||||
BoardsDataRetention: false,
|
||||
NormalizeLdapDNs: false,
|
||||
UseCaseOnboarding: true,
|
||||
GraphQL: false,
|
||||
InsightsEnabled: true,
|
||||
CommandPalette: false,
|
||||
|
@ -1,4 +1,4 @@
|
||||
.PHONY: build package run stop run-client run-server run-haserver stop-haserver stop-client stop-server restart restart-server restart-client restart-haserver start-docker clean-dist clean nuke check-style check-client-style check-server-style check-unit-tests test dist run-client-tests setup-run-client-tests cleanup-run-client-tests test-client build-linux build-osx build-windows package-prep package-linux package-osx package-windows internal-test-web-client vet run-server-for-web-client-tests diff-config prepackaged-plugins prepackaged-binaries test-server test-server-ee test-server-quick test-server-race new-migration migrations-extract
|
||||
.PHONY: build package run stop run-client run-server run-haserver stop-haserver stop-client stop-server restart restart-server restart-client restart-haserver start-docker update-docker clean-dist clean nuke check-style check-client-style check-server-style check-unit-tests test dist run-client-tests setup-run-client-tests cleanup-run-client-tests test-client build-linux build-osx build-windows package-prep package-linux package-osx package-windows internal-test-web-client vet run-server-for-web-client-tests diff-config prepackaged-plugins prepackaged-binaries test-server test-server-ee test-server-quick test-server-race new-migration migrations-extract
|
||||
|
||||
ROOT := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
@ -237,6 +237,11 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
update-docker: stop-docker ## Updates the docker containers for local development.
|
||||
@echo Updating docker containers
|
||||
|
||||
$(GO) run ./build/docker-compose-generator/main.go $(ENABLED_DOCKER_SERVICES) | docker-compose -f docker-compose.makefile.yml -f /dev/stdin $(DOCKER_COMPOSE_OVERRIDE) up --no-start
|
||||
|
||||
run-haserver:
|
||||
ifeq ($(BUILD_ENTERPRISE_READY),true)
|
||||
@echo Starting mattermost in an HA topology '(3 node cluster)'
|
||||
|
@ -70,7 +70,10 @@ func (s *SQLStore) getMigrationConnection() (*sql.DB, error) {
|
||||
}
|
||||
*settings.DriverName = s.dbType
|
||||
|
||||
db := sqlstore.SetupConnection("master", connectionString, &settings)
|
||||
db, err := sqlstore.SetupConnection("master", connectionString, &settings, sqlstore.DBPingAttempts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func (s *SQLStore) isSchemaMigrationNeeded() (bool, error) {
|
||||
case model.MysqlDBType:
|
||||
query = query.Where(sq.Eq{"TABLE_SCHEMA": s.schemaName})
|
||||
case model.PostgresDBType:
|
||||
query = query.Where(sq.Eq{"TABLE_SCHEMA": "current_schema()"})
|
||||
query = query.Where("table_schema = current_schema()")
|
||||
}
|
||||
|
||||
rows, err := query.Query()
|
||||
|
@ -58,7 +58,7 @@ RUN apt-get update \
|
||||
libxext6=2:1.3.3-1+b2 \
|
||||
libxrender1=1:0.9.10-1 \
|
||||
libcairo2=1.16.0-4+deb10u1 \
|
||||
libcurl3-gnutls=7.64.0-4+deb10u5 \
|
||||
libcurl3-gnutls=7.64.0-4+deb10u6 \
|
||||
libglib2.0-0=2.58.3-2+deb10u3 \
|
||||
libgsf-1-common=1.14.45-1 \
|
||||
libgsf-1-114=1.14.45-1 \
|
||||
|
@ -1093,12 +1093,6 @@ func CheckErrorMessage(tb testing.TB, err error, message string) {
|
||||
require.Equalf(tb, message, appError.Message, "incorrect error message, actual: %s, expected: %s", appError.Id, message)
|
||||
}
|
||||
|
||||
func CheckStartsWith(tb testing.TB, value, prefix, message string) {
|
||||
tb.Helper()
|
||||
|
||||
require.True(tb, strings.HasPrefix(value, prefix), message, value)
|
||||
}
|
||||
|
||||
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
|
||||
// If signV2 input is false, function always returns signature v4.
|
||||
//
|
||||
|
@ -859,13 +859,22 @@ func TestGetPublicChannelsForTeam(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, channels, 4, "wrong path")
|
||||
|
||||
for i, c := range channels {
|
||||
var foundPublicChannel1, foundPublicChannel2 bool
|
||||
for _, c := range channels {
|
||||
// check all channels included are open
|
||||
require.Equal(t, model.ChannelTypeOpen, c.Type, "should include open channel only")
|
||||
|
||||
// only check the created 2 public channels
|
||||
require.False(t, i < 2 && !(c.DisplayName == publicChannel1.DisplayName || c.DisplayName == publicChannel2.DisplayName), "should match public channel display name")
|
||||
switch c.DisplayName {
|
||||
case publicChannel1.DisplayName:
|
||||
foundPublicChannel1 = true
|
||||
case publicChannel2.DisplayName:
|
||||
foundPublicChannel2 = true
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, foundPublicChannel1, "failed to find publicChannel1")
|
||||
require.True(t, foundPublicChannel2, "failed to find publicChannel2")
|
||||
|
||||
privateChannel := th.CreatePrivateChannel()
|
||||
channels, _, err = client.GetPublicChannelsForTeam(team.Id, 0, 100, "")
|
||||
@ -1135,9 +1144,14 @@ func TestGetAllChannels(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
beforeCount := len(channels)
|
||||
|
||||
firstChannel := channels[0].Channel
|
||||
deletedChannel := channels[0].Channel
|
||||
|
||||
_, err = client.DeleteChannel(firstChannel.Id)
|
||||
// Never try to delete the default channel
|
||||
if deletedChannel.Name == "town-square" {
|
||||
deletedChannel = channels[1].Channel
|
||||
}
|
||||
|
||||
_, err = client.DeleteChannel(deletedChannel.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
channels, _, err = client.GetAllChannels(0, 10000, "")
|
||||
@ -1147,7 +1161,7 @@ func TestGetAllChannels(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Len(t, channels, beforeCount-1)
|
||||
require.NotContains(t, ids, firstChannel.Id)
|
||||
require.NotContains(t, ids, deletedChannel.Id)
|
||||
|
||||
channels, _, err = client.GetAllChannelsIncludeDeleted(0, 10000, "")
|
||||
ids = []string{}
|
||||
@ -1156,7 +1170,7 @@ func TestGetAllChannels(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.True(t, len(channels) > beforeCount)
|
||||
require.Contains(t, ids, firstChannel.Id)
|
||||
require.Contains(t, ids, deletedChannel.Id)
|
||||
})
|
||||
|
||||
_, resp, err := client.GetAllChannels(0, 20, "")
|
||||
|
@ -103,6 +103,7 @@ func getSubscription(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
DNS: "",
|
||||
LastInvoice: &model.Invoice{},
|
||||
DelinquentSince: subscription.DelinquentSince,
|
||||
BillingType: "",
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -329,13 +329,6 @@ func executeCommand(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// For compatibility reasons, PermissionCreatePost is also checked.
|
||||
// TODO: Remove in 8.0: https://mattermost.atlassian.net/browse/MM-51274
|
||||
if !c.App.SessionHasPermissionToChannel(c.AppContext, *c.AppContext.Session(), commandArgs.ChannelId, model.PermissionUseSlashCommands) {
|
||||
c.SetPermissionError(model.PermissionUseSlashCommands)
|
||||
return
|
||||
}
|
||||
|
||||
channel, err := c.App.GetChannel(c.AppContext, commandArgs.ChannelId)
|
||||
if err != nil {
|
||||
c.Err = err
|
||||
@ -354,13 +347,6 @@ func executeCommand(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
c.SetPermissionError(model.PermissionCreatePost)
|
||||
return
|
||||
}
|
||||
|
||||
// For compatibility reasons, PermissionCreatePost is also checked.
|
||||
// TODO: Remove in 8.0: https://mattermost.atlassian.net/browse/MM-51274
|
||||
if !c.App.SessionHasPermissionTo(*c.AppContext.Session(), model.PermissionUseSlashCommands) {
|
||||
c.SetPermissionError(model.PermissionUseSlashCommands)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@ -790,6 +789,12 @@ func TestGetFileHeaders(t *testing.T) {
|
||||
t.Skip("skipping because no file driver is enabled")
|
||||
}
|
||||
|
||||
CheckStartsWith := func(tb testing.TB, value, prefix, message string) {
|
||||
tb.Helper()
|
||||
|
||||
require.True(tb, strings.HasPrefix(value, prefix), fmt.Sprintf("%s: %s", message, value))
|
||||
}
|
||||
|
||||
testHeaders := func(data []byte, filename string, expectedContentType string, getInline bool, loadFile bool) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
if loadFile {
|
||||
@ -832,11 +837,8 @@ func TestGetFileHeaders(t *testing.T) {
|
||||
t.Run("txt", testHeaders(data, "test.txt", "text/plain", false, false))
|
||||
t.Run("html", testHeaders(data, "test.html", "text/plain", false, false))
|
||||
t.Run("js", testHeaders(data, "test.js", "text/plain", false, false))
|
||||
if os.Getenv("IS_CI") == "true" {
|
||||
t.Run("go", testHeaders(data, "test.go", "application/octet-stream", false, false))
|
||||
} else if runtime.GOOS == "linux" || runtime.GOOS == "darwin" {
|
||||
t.Run("go", testHeaders(data, "test.go", "text/x-go; charset=utf-8", false, false))
|
||||
}
|
||||
// *.go are categorized differently by different platforms
|
||||
// t.Run("go", testHeaders(data, "test.go", "text/x-go; charset=utf-8", false, false))
|
||||
t.Run("zip", testHeaders(data, "test.zip", "application/zip", false, false))
|
||||
// Not every platform can recognize these
|
||||
//t.Run("exe", testHeaders(data, "test.exe", "application/x-ms", false))
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@ -32,6 +31,8 @@ func (api *API) InitHostedCustomer() {
|
||||
api.BaseRoutes.HostedCustomer.Handle("/customer", api.APISessionRequired(selfHostedCustomer)).Methods("POST")
|
||||
// POST /api/v4/hosted_customer/confirm
|
||||
api.BaseRoutes.HostedCustomer.Handle("/confirm", api.APISessionRequired(selfHostedConfirm)).Methods("POST")
|
||||
// POST /api.v4/hosted_customer/confirm-expand
|
||||
api.BaseRoutes.HostedCustomer.Handle("/confirm-expand", api.APISessionRequired(selfHostedConfirmExpand)).Methods("POST")
|
||||
// GET /api/v4/hosted_customer/invoices
|
||||
api.BaseRoutes.HostedCustomer.Handle("/invoices", api.APISessionRequired(selfHostedInvoices)).Methods("GET")
|
||||
// GET /api/v4/hosted_customer/invoices/{invoice_id:in_[A-Za-z0-9]+}/pdf
|
||||
@ -172,6 +173,7 @@ func selfHostedConfirm(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
c.Err = userErr
|
||||
return
|
||||
}
|
||||
|
||||
confirmResponse, err := c.App.Cloud().ConfirmSelfHostedSignup(confirm, user.Email)
|
||||
if err != nil {
|
||||
if confirmResponse != nil {
|
||||
@ -185,9 +187,8 @@ func selfHostedConfirm(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusInternalServerError).Wrap(err)
|
||||
return
|
||||
}
|
||||
license, err := c.App.Srv().Platform().SaveLicense([]byte(confirmResponse.License))
|
||||
// dealing with an AppError
|
||||
if !(reflect.ValueOf(err).Kind() == reflect.Ptr && reflect.ValueOf(err).IsNil()) {
|
||||
license, appErr := c.App.Srv().Platform().SaveLicense([]byte(confirmResponse.License))
|
||||
if appErr != nil {
|
||||
if confirmResponse != nil {
|
||||
c.App.NotifySelfHostedSignupProgress(confirmResponse.Progress, user.Id)
|
||||
}
|
||||
@ -325,3 +326,80 @@ func handleSubscribeToNewsletter(c *Context, w http.ResponseWriter, r *http.Requ
|
||||
|
||||
ReturnStatusOK(w)
|
||||
}
|
||||
|
||||
func selfHostedConfirmExpand(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
const where = "Api4.selfHostedConfirmExpand"
|
||||
|
||||
ensureSelfHostedAdmin(c, where)
|
||||
if c.Err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !checkSelfHostedPurchaseEnabled(c) {
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusBadRequest).Wrap(err)
|
||||
return
|
||||
}
|
||||
|
||||
var confirm model.SelfHostedConfirmPaymentMethodRequest
|
||||
err = json.Unmarshal(bodyBytes, &confirm)
|
||||
if err != nil {
|
||||
c.Err = model.NewAppError(where, "api.cloud.request_error", nil, "", http.StatusBadRequest).Wrap(err)
|
||||
return
|
||||
}
|
||||
|
||||
user, userErr := c.App.GetUser(c.AppContext.Session().UserId)
|
||||
if userErr != nil {
|
||||
c.Err = userErr
|
||||
return
|
||||
}
|
||||
|
||||
confirmResponse, err := c.App.Cloud().ConfirmSelfHostedExpansion(confirm, user.Email)
|
||||
if err != nil {
|
||||
if confirmResponse != nil {
|
||||
c.App.NotifySelfHostedSignupProgress(confirmResponse.Progress, user.Id)
|
||||
}
|
||||
|
||||
if err.Error() == fmt.Sprintf("%d", http.StatusUnprocessableEntity) {
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusUnprocessableEntity).Wrap(err)
|
||||
return
|
||||
}
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusInternalServerError).Wrap(err)
|
||||
return
|
||||
}
|
||||
|
||||
license, appErr := c.App.Srv().Platform().SaveLicense([]byte(confirmResponse.License))
|
||||
// dealing with an AppError
|
||||
if appErr != nil {
|
||||
if confirmResponse != nil {
|
||||
c.App.NotifySelfHostedSignupProgress(confirmResponse.Progress, user.Id)
|
||||
}
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusInternalServerError).Wrap(err)
|
||||
return
|
||||
}
|
||||
clientResponse, err := json.Marshal(model.SelfHostedSignupConfirmClientResponse{
|
||||
License: utils.GetClientLicense(license),
|
||||
Progress: confirmResponse.Progress,
|
||||
})
|
||||
if err != nil {
|
||||
if confirmResponse != nil {
|
||||
c.App.NotifySelfHostedSignupProgress(confirmResponse.Progress, user.Id)
|
||||
}
|
||||
c.Err = model.NewAppError(where, "api.cloud.app_error", nil, "", http.StatusInternalServerError).Wrap(err)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := c.App.Cloud().ConfirmSelfHostedSignupLicenseApplication()
|
||||
if err != nil {
|
||||
c.Logger.Warn("Unable to confirm license application", mlog.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
_, _ = w.Write(clientResponse)
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ func TestNotifyAdmin(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), ": Unable to save notify data.")
|
||||
require.Equal(t, ": Unable to save notify data.", err.Error())
|
||||
require.Equal(t, http.StatusInternalServerError, statusCode)
|
||||
|
||||
})
|
||||
@ -38,7 +38,7 @@ func TestNotifyAdmin(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), ": Unable to save notify data.")
|
||||
require.Equal(t, ": Unable to save notify data.", err.Error())
|
||||
require.Equal(t, http.StatusInternalServerError, statusCode)
|
||||
|
||||
})
|
||||
@ -53,7 +53,7 @@ func TestNotifyAdmin(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), ": Unable to save notify data.")
|
||||
require.Equal(t, ": Unable to save notify data.", err.Error())
|
||||
require.Equal(t, http.StatusInternalServerError, statusCode)
|
||||
})
|
||||
|
||||
@ -68,7 +68,7 @@ func TestNotifyAdmin(t *testing.T) {
|
||||
})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), ": Unable to save notify data.")
|
||||
require.Equal(t, ": Unable to save notify data.", err.Error())
|
||||
require.Equal(t, http.StatusInternalServerError, statusCode)
|
||||
})
|
||||
|
||||
@ -90,7 +90,7 @@ func TestNotifyAdmin(t *testing.T) {
|
||||
})
|
||||
require.Error(t, err)
|
||||
|
||||
require.Equal(t, err.Error(), ": Already notified admin")
|
||||
require.Equal(t, ": Already notified admin", err.Error())
|
||||
require.Equal(t, http.StatusForbidden, statusCode)
|
||||
})
|
||||
|
||||
@ -118,7 +118,7 @@ func TestTriggerNotifyAdmin(t *testing.T) {
|
||||
statusCode, err := th.SystemAdminClient.TriggerNotifyAdmin(&model.NotifyAdminToUpgradeRequest{})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), ": Internal error during cloud api request.")
|
||||
require.Equal(t, ": Internal error during cloud api request.", err.Error())
|
||||
require.Equal(t, http.StatusForbidden, statusCode)
|
||||
|
||||
})
|
||||
@ -132,7 +132,7 @@ func TestTriggerNotifyAdmin(t *testing.T) {
|
||||
statusCode, err := th.Client.TriggerNotifyAdmin(&model.NotifyAdminToUpgradeRequest{})
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, err.Error(), ": You do not have the appropriate permissions.")
|
||||
require.Equal(t, ": You do not have the appropriate permissions.", err.Error())
|
||||
require.Equal(t, http.StatusForbidden, statusCode)
|
||||
})
|
||||
|
||||
|
@ -76,24 +76,6 @@ func TestPlugin(t *testing.T) {
|
||||
_, err = client.RemovePlugin(manifest.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("install plugin from URL with slow response time", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test to install plugin from a slow response server")
|
||||
}
|
||||
|
||||
// Install from URL - slow server to simulate longer bundle download times
|
||||
slowTestServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
|
||||
time.Sleep(60 * time.Second) // Wait longer than the previous default 30 seconds timeout
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write(tarData)
|
||||
}))
|
||||
defer func() { slowTestServer.Close() }()
|
||||
|
||||
manifest, _, err = client.InstallPluginFromURL(slowTestServer.URL, true)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testplugin", manifest.Id)
|
||||
})
|
||||
|
||||
th.App.Channels().RemovePlugin(manifest.Id)
|
||||
|
||||
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.PluginSettings.Enable = false })
|
||||
@ -121,6 +103,7 @@ func TestPlugin(t *testing.T) {
|
||||
// Successful upload
|
||||
manifest, _, err = client.UploadPlugin(bytes.NewReader(tarData))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "testplugin", manifest.Id)
|
||||
|
||||
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.PluginSettings.EnableUploads = true })
|
||||
|
||||
@ -1652,6 +1635,59 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
require.Nil(t, manifest)
|
||||
assert.True(t, requestHandled)
|
||||
}, "verify EnterprisePlugins is true for E20")
|
||||
}
|
||||
|
||||
func TestInstallMarketplacePluginPrepackagedDisabled(t *testing.T) {
|
||||
path, _ := fileutils.FindDir("tests")
|
||||
|
||||
signatureFilename := "testplugin2.tar.gz.sig"
|
||||
signatureFileReader, err := os.Open(filepath.Join(path, signatureFilename))
|
||||
require.NoError(t, err)
|
||||
sigFile, err := io.ReadAll(signatureFileReader)
|
||||
require.NoError(t, err)
|
||||
pluginSignature := base64.StdEncoding.EncodeToString(sigFile)
|
||||
|
||||
tarData, err := os.ReadFile(filepath.Join(path, "testplugin2.tar.gz"))
|
||||
require.NoError(t, err)
|
||||
pluginServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(http.StatusOK)
|
||||
res.Write(tarData)
|
||||
}))
|
||||
defer pluginServer.Close()
|
||||
|
||||
samplePlugins := []*model.MarketplacePlugin{
|
||||
{
|
||||
BaseMarketplacePlugin: &model.BaseMarketplacePlugin{
|
||||
HomepageURL: "https://example.com/mattermost/mattermost-plugin-nps",
|
||||
IconData: "https://example.com/icon.svg",
|
||||
DownloadURL: pluginServer.URL,
|
||||
Manifest: &model.Manifest{
|
||||
Id: "testplugin2",
|
||||
Name: "testplugin2",
|
||||
Description: "a second plugin",
|
||||
Version: "1.2.2",
|
||||
MinServerVersion: "",
|
||||
},
|
||||
},
|
||||
InstalledVersion: "",
|
||||
},
|
||||
{
|
||||
BaseMarketplacePlugin: &model.BaseMarketplacePlugin{
|
||||
HomepageURL: "https://example.com/mattermost/mattermost-plugin-nps",
|
||||
IconData: "https://example.com/icon.svg",
|
||||
DownloadURL: pluginServer.URL,
|
||||
Manifest: &model.Manifest{
|
||||
Id: "testplugin2",
|
||||
Name: "testplugin2",
|
||||
Description: "a second plugin",
|
||||
Version: "1.2.3",
|
||||
MinServerVersion: "",
|
||||
},
|
||||
Signature: pluginSignature,
|
||||
},
|
||||
InstalledVersion: "",
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("install prepackaged and remote plugins through marketplace", func(t *testing.T) {
|
||||
prepackagedPluginsDir := "prepackaged_plugins"
|
||||
@ -1669,13 +1705,13 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
err = testlib.CopyFile(filepath.Join(path, "testplugin.tar.gz.asc"), filepath.Join(prepackagedPluginsDir, "testplugin.tar.gz.sig"))
|
||||
require.NoError(t, err)
|
||||
|
||||
th2 := SetupConfig(t, func(cfg *model.Config) {
|
||||
th := SetupConfig(t, func(cfg *model.Config) {
|
||||
// Disable auto-installing prepackaged plugins
|
||||
*cfg.PluginSettings.AutomaticPrepackagedPlugins = false
|
||||
}).InitBasic()
|
||||
defer th2.TearDown()
|
||||
defer th.TearDown()
|
||||
|
||||
th2.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) {
|
||||
th.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) {
|
||||
pluginSignatureFile, err := os.Open(filepath.Join(path, "testplugin.tar.gz.asc"))
|
||||
require.NoError(t, err)
|
||||
pluginSignatureData, err := io.ReadAll(pluginSignatureFile)
|
||||
@ -1683,36 +1719,48 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
|
||||
key, err := os.Open(filepath.Join(path, "development-private-key.asc"))
|
||||
require.NoError(t, err)
|
||||
appErr := th2.App.AddPublicKey("pub_key", key)
|
||||
appErr := th.App.AddPublicKey("pub_key", key)
|
||||
require.Nil(t, appErr)
|
||||
|
||||
t.Cleanup(func() {
|
||||
appErr = th.App.DeletePublicKey("pub_key")
|
||||
require.Nil(t, appErr)
|
||||
})
|
||||
|
||||
testServer := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
|
||||
serverVersion := req.URL.Query().Get("server_version")
|
||||
require.NotEmpty(t, serverVersion)
|
||||
require.Equal(t, model.CurrentVersion, serverVersion)
|
||||
res.WriteHeader(http.StatusOK)
|
||||
|
||||
var out []byte
|
||||
|
||||
// Return something if testplugin2 or no specific plugin is requested
|
||||
pluginID := req.URL.Query().Get("plugin_id")
|
||||
if pluginID == "" || pluginID == samplePlugins[1].Manifest.Id {
|
||||
out, err = json.Marshal([]*model.MarketplacePlugin{samplePlugins[1]})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
res.Write(out)
|
||||
}))
|
||||
defer testServer.Close()
|
||||
|
||||
th2.App.UpdateConfig(func(cfg *model.Config) {
|
||||
th.App.UpdateConfig(func(cfg *model.Config) {
|
||||
*cfg.PluginSettings.EnableMarketplace = true
|
||||
*cfg.PluginSettings.EnableRemoteMarketplace = false
|
||||
*cfg.PluginSettings.MarketplaceURL = testServer.URL
|
||||
*cfg.PluginSettings.AllowInsecureDownloadURL = false
|
||||
})
|
||||
|
||||
env := th2.App.GetPluginsEnvironment()
|
||||
env := th.App.GetPluginsEnvironment()
|
||||
|
||||
pluginsResp, _, err := client.GetPlugins()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pluginsResp.Active, 0)
|
||||
require.Len(t, pluginsResp.Inactive, 0)
|
||||
|
||||
// Should fail to install unknown prepackaged plugin
|
||||
t.Run("Should fail to install unknown prepackaged plugin", func(t *testing.T) {
|
||||
pRequest := &model.InstallMarketplacePluginRequest{Id: "testpluginXX"}
|
||||
manifest, resp, err := client.InstallMarketplacePlugin(pRequest)
|
||||
require.Error(t, err)
|
||||
@ -1728,36 +1776,73 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pluginsResp.Active, 0)
|
||||
require.Len(t, pluginsResp.Inactive, 0)
|
||||
})
|
||||
|
||||
pRequest = &model.InstallMarketplacePluginRequest{Id: "testplugin"}
|
||||
manifest1, _, err := client.InstallMarketplacePlugin(pRequest)
|
||||
t.Run("Install prepackaged plugin with Marketplace disabled", func(t *testing.T) {
|
||||
pRequest := &model.InstallMarketplacePluginRequest{Id: "testplugin"}
|
||||
manifest, _, err := client.InstallMarketplacePlugin(pRequest)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, manifest1)
|
||||
require.Equal(t, "testplugin", manifest1.Id)
|
||||
require.Equal(t, "0.0.1", manifest1.Version)
|
||||
require.NotNil(t, manifest)
|
||||
require.Equal(t, "testplugin", manifest.Id)
|
||||
require.Equal(t, "0.0.1", manifest.Version)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err = client.RemovePlugin(manifest.Id)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
pluginsResp, _, err = client.GetPlugins()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pluginsResp.Active, 0)
|
||||
require.Equal(t, pluginsResp.Inactive, []*model.PluginInfo{{
|
||||
Manifest: *manifest1,
|
||||
Manifest: *manifest,
|
||||
}})
|
||||
})
|
||||
|
||||
// Try to install remote marketplace plugin
|
||||
pRequest = &model.InstallMarketplacePluginRequest{Id: "testplugin2"}
|
||||
manifest, resp, err = client.InstallMarketplacePlugin(pRequest)
|
||||
t.Run("Try to install remote marketplace plugin while Marketplace is disabled", func(t *testing.T) {
|
||||
pRequest := &model.InstallMarketplacePluginRequest{Id: "testplugin2"}
|
||||
manifest, resp, err := client.InstallMarketplacePlugin(pRequest)
|
||||
require.Error(t, err)
|
||||
CheckInternalErrorStatus(t, resp)
|
||||
require.Nil(t, manifest)
|
||||
})
|
||||
|
||||
// Enable remote marketplace
|
||||
th2.App.UpdateConfig(func(cfg *model.Config) {
|
||||
th.App.UpdateConfig(func(cfg *model.Config) {
|
||||
*cfg.PluginSettings.EnableMarketplace = true
|
||||
*cfg.PluginSettings.EnableRemoteMarketplace = true
|
||||
*cfg.PluginSettings.MarketplaceURL = testServer.URL
|
||||
*cfg.PluginSettings.AllowInsecureDownloadURL = true
|
||||
})
|
||||
|
||||
t.Run("Install prepackaged, not listed plugin with Marketplace enabled", func(t *testing.T) {
|
||||
pRequest := &model.InstallMarketplacePluginRequest{Id: "testplugin"}
|
||||
manifest, _, err := client.InstallMarketplacePlugin(pRequest)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err = client.RemovePlugin(manifest.Id)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
require.NotNil(t, manifest)
|
||||
assert.Equal(t, "testplugin", manifest.Id)
|
||||
assert.Equal(t, "0.0.1", manifest.Version)
|
||||
})
|
||||
|
||||
t.Run("Install both a prepacked and a Marketplace plugin", func(t *testing.T) {
|
||||
pRequest := &model.InstallMarketplacePluginRequest{Id: "testplugin"}
|
||||
manifest1, _, err := client.InstallMarketplacePlugin(pRequest)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, manifest1)
|
||||
assert.Equal(t, "testplugin", manifest1.Id)
|
||||
assert.Equal(t, "0.0.1", manifest1.Version)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err = client.RemovePlugin(manifest1.Id)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
pRequest = &model.InstallMarketplacePluginRequest{Id: "testplugin2"}
|
||||
manifest2, _, err := client.InstallMarketplacePlugin(pRequest)
|
||||
require.NoError(t, err)
|
||||
@ -1765,6 +1850,11 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
require.Equal(t, "testplugin2", manifest2.Id)
|
||||
require.Equal(t, "1.2.3", manifest2.Version)
|
||||
|
||||
t.Cleanup(func() {
|
||||
_, err = client.RemovePlugin(manifest2.Id)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
pluginsResp, _, err = client.GetPlugins()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pluginsResp.Active, 0)
|
||||
@ -1776,20 +1866,14 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
Manifest: *manifest2,
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
// Clean up
|
||||
_, err = client.RemovePlugin(manifest1.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = client.RemovePlugin(manifest2.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
appErr = th2.App.DeletePublicKey("pub_key")
|
||||
appErr = th.App.DeletePublicKey("pub_key")
|
||||
require.Nil(t, appErr)
|
||||
})
|
||||
})
|
||||
|
||||
th.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) {
|
||||
t.Run("missing prepackaged and remote plugin signatures", func(t *testing.T) {
|
||||
prepackagedPluginsDir := "prepackaged_plugins"
|
||||
|
||||
os.RemoveAll(prepackagedPluginsDir)
|
||||
@ -1809,6 +1893,7 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
}).InitBasic()
|
||||
defer th.TearDown()
|
||||
|
||||
th.TestForSystemAdminAndLocal(t, func(t *testing.T, client *model.Client4) {
|
||||
key, err := os.Open(filepath.Join(path, "development-private-key.asc"))
|
||||
require.NoError(t, err)
|
||||
appErr := th.App.AddPublicKey("pub_key", key)
|
||||
@ -1872,7 +1957,8 @@ func TestInstallMarketplacePlugin(t *testing.T) {
|
||||
// Clean up
|
||||
appErr = th.App.DeletePublicKey("pub_key")
|
||||
require.Nil(t, appErr)
|
||||
}, "missing prepackaged and remote plugin signatures")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func findClusterMessages(event model.ClusterEvent, msgs []*model.ClusterMessage) []*model.ClusterMessage {
|
||||
|
@ -452,32 +452,68 @@ func testCreatePostWithOutgoingHook(
|
||||
}
|
||||
|
||||
func TestCreatePostWithOutgoingHook_form_urlencoded(t *testing.T) {
|
||||
t.Run("Case 1", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/x-www-form-urlencoded", "application/x-www-form-urlencoded", "triggerword lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsExactMatch, false)
|
||||
})
|
||||
t.Run("Case 2", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/x-www-form-urlencoded", "application/x-www-form-urlencoded", "triggerwordaaazzz lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsStartsWith, false)
|
||||
})
|
||||
t.Run("Case 3", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/x-www-form-urlencoded", "application/x-www-form-urlencoded", "", "", []string{"file_id_1"}, app.TriggerwordsExactMatch, false)
|
||||
})
|
||||
t.Run("Case 4", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/x-www-form-urlencoded", "application/x-www-form-urlencoded", "", "", []string{"file_id_1"}, app.TriggerwordsStartsWith, false)
|
||||
})
|
||||
t.Run("Case 5", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/x-www-form-urlencoded", "application/x-www-form-urlencoded", "triggerword lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsExactMatch, true)
|
||||
})
|
||||
t.Run("Case 6", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/x-www-form-urlencoded", "application/x-www-form-urlencoded", "triggerwordaaazzz lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsStartsWith, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreatePostWithOutgoingHook_json(t *testing.T) {
|
||||
t.Run("Case 1", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/json", "application/json", "triggerword lorem ipsum", "triggerword", []string{"file_id_1, file_id_2"}, app.TriggerwordsExactMatch, false)
|
||||
})
|
||||
t.Run("Case 2", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/json", "application/json", "triggerwordaaazzz lorem ipsum", "triggerword", []string{"file_id_1, file_id_2"}, app.TriggerwordsStartsWith, false)
|
||||
})
|
||||
t.Run("Case 3", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/json", "application/json", "triggerword lorem ipsum", "", []string{"file_id_1"}, app.TriggerwordsExactMatch, false)
|
||||
})
|
||||
t.Run("Case 4", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/json", "application/json", "triggerwordaaazzz lorem ipsum", "", []string{"file_id_1"}, app.TriggerwordsStartsWith, false)
|
||||
})
|
||||
t.Run("Case 5", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/json", "application/json", "triggerword lorem ipsum", "triggerword", []string{"file_id_1, file_id_2"}, app.TriggerwordsExactMatch, true)
|
||||
})
|
||||
t.Run("Case 6", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "application/json", "application/json", "triggerwordaaazzz lorem ipsum", "", []string{"file_id_1"}, app.TriggerwordsStartsWith, true)
|
||||
})
|
||||
}
|
||||
|
||||
// hooks created before we added the ContentType field should be considered as
|
||||
// application/x-www-form-urlencoded
|
||||
func TestCreatePostWithOutgoingHook_no_content_type(t *testing.T) {
|
||||
t.Run("Case 1", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "", "application/x-www-form-urlencoded", "triggerword lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsExactMatch, false)
|
||||
})
|
||||
t.Run("Case 2", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "", "application/x-www-form-urlencoded", "triggerwordaaazzz lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsStartsWith, false)
|
||||
})
|
||||
t.Run("Case 3", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "", "application/x-www-form-urlencoded", "triggerword lorem ipsum", "", []string{"file_id_1, file_id_2"}, app.TriggerwordsExactMatch, false)
|
||||
})
|
||||
t.Run("Case 4", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "", "application/x-www-form-urlencoded", "triggerwordaaazzz lorem ipsum", "", []string{"file_id_1, file_id_2"}, app.TriggerwordsStartsWith, false)
|
||||
})
|
||||
t.Run("Case 5", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "", "application/x-www-form-urlencoded", "triggerword lorem ipsum", "triggerword", []string{"file_id_1"}, app.TriggerwordsExactMatch, true)
|
||||
})
|
||||
t.Run("Case 6", func(t *testing.T) {
|
||||
testCreatePostWithOutgoingHook(t, "", "application/x-www-form-urlencoded", "triggerword lorem ipsum", "", []string{"file_id_1, file_id_2"}, app.TriggerwordsExactMatch, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreatePostPublic(t *testing.T) {
|
||||
@ -3199,6 +3235,7 @@ func TestGetEditHistoryForPost(t *testing.T) {
|
||||
|
||||
func TestCreatePostNotificationsWithCRT(t *testing.T) {
|
||||
th := Setup(t).InitBasic()
|
||||
defer th.TearDown()
|
||||
rpost := th.CreatePost()
|
||||
|
||||
th.App.UpdateConfig(func(cfg *model.Config) {
|
||||
|
@ -892,6 +892,7 @@ func TestCompleteOnboarding(t *testing.T) {
|
||||
|
||||
req := &model.CompleteOnboardingRequest{
|
||||
InstallPlugins: []string{"testplugin2"},
|
||||
Organization: "my-org",
|
||||
}
|
||||
|
||||
t.Run("as a regular user", func(t *testing.T) {
|
||||
|
@ -1173,11 +1173,10 @@ func TestGetAllTeams(t *testing.T) {
|
||||
}
|
||||
|
||||
var teams []*model.Team
|
||||
var count int64
|
||||
var resp *model.Response
|
||||
var err2 error
|
||||
if tc.WithCount {
|
||||
teams, count, resp, err2 = client.GetAllTeamsWithTotalCount("", tc.Page, tc.PerPage)
|
||||
teams, _, resp, err2 = client.GetAllTeamsWithTotalCount("", tc.Page, tc.PerPage)
|
||||
} else {
|
||||
teams, resp, err2 = client.GetAllTeams("", tc.Page, tc.PerPage)
|
||||
}
|
||||
@ -1187,11 +1186,12 @@ func TestGetAllTeams(t *testing.T) {
|
||||
return
|
||||
}
|
||||
require.NoError(t, err2)
|
||||
require.Equal(t, len(tc.ExpectedTeams), len(teams))
|
||||
for idx, team := range teams {
|
||||
assert.Equal(t, tc.ExpectedTeams[idx], team.Id)
|
||||
|
||||
actualTeamIds := make([]string, 0, len(tc.ExpectedTeams))
|
||||
for _, team := range teams {
|
||||
actualTeamIds = append(actualTeamIds, team.Id)
|
||||
}
|
||||
require.Equal(t, tc.ExpectedCount, count)
|
||||
require.ElementsMatch(t, tc.ExpectedTeams, actualTeamIds)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -3106,6 +3106,10 @@ func getThreadForUser(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
c.SetPermissionError(model.PermissionEditOtherUsers)
|
||||
return
|
||||
}
|
||||
if !c.App.SessionHasPermissionToChannelByPost(*c.AppContext.Session(), c.Params.ThreadId, model.PermissionReadChannel) {
|
||||
c.SetPermissionError(model.PermissionReadChannel)
|
||||
return
|
||||
}
|
||||
extendedStr := r.URL.Query().Get("extended")
|
||||
extended, _ := strconv.ParseBool(extendedStr)
|
||||
|
||||
@ -3136,6 +3140,10 @@ func getThreadsForUser(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
c.SetPermissionError(model.PermissionEditOtherUsers)
|
||||
return
|
||||
}
|
||||
if !c.App.SessionHasPermissionToTeam(*c.AppContext.Session(), c.Params.TeamId, model.PermissionViewTeam) {
|
||||
c.SetPermissionError(model.PermissionViewTeam)
|
||||
return
|
||||
}
|
||||
|
||||
options := model.GetUserThreadsOpts{
|
||||
Since: 0,
|
||||
@ -3213,6 +3221,10 @@ func updateReadStateThreadByUser(c *Context, w http.ResponseWriter, r *http.Requ
|
||||
c.SetPermissionError(model.PermissionEditOtherUsers)
|
||||
return
|
||||
}
|
||||
if !c.App.SessionHasPermissionToChannelByPost(*c.AppContext.Session(), c.Params.ThreadId, model.PermissionReadChannel) {
|
||||
c.SetPermissionError(model.PermissionReadChannel)
|
||||
return
|
||||
}
|
||||
|
||||
thread, err := c.App.UpdateThreadReadForUser(c.AppContext, c.AppContext.Session().Id, c.Params.UserId, c.Params.TeamId, c.Params.ThreadId, c.Params.Timestamp)
|
||||
if err != nil {
|
||||
@ -3279,6 +3291,10 @@ func unfollowThreadByUser(c *Context, w http.ResponseWriter, r *http.Request) {
|
||||
c.SetPermissionError(model.PermissionEditOtherUsers)
|
||||
return
|
||||
}
|
||||
if !c.App.SessionHasPermissionToChannelByPost(*c.AppContext.Session(), c.Params.ThreadId, model.PermissionReadChannel) {
|
||||
c.SetPermissionError(model.PermissionReadChannel)
|
||||
return
|
||||
}
|
||||
|
||||
err := c.App.UpdateThreadFollowForUser(c.Params.UserId, c.Params.TeamId, c.Params.ThreadId, false)
|
||||
if err != nil {
|
||||
@ -3338,6 +3354,10 @@ func updateReadStateAllThreadsByUser(c *Context, w http.ResponseWriter, r *http.
|
||||
c.SetPermissionError(model.PermissionEditOtherUsers)
|
||||
return
|
||||
}
|
||||
if !c.App.SessionHasPermissionToTeam(*c.AppContext.Session(), c.Params.TeamId, model.PermissionViewTeam) {
|
||||
c.SetPermissionError(model.PermissionViewTeam)
|
||||
return
|
||||
}
|
||||
|
||||
err := c.App.UpdateThreadsReadForUser(c.Params.UserId, c.Params.TeamId)
|
||||
if err != nil {
|
||||
|
@ -6360,6 +6360,15 @@ func TestGetThreadsForUser(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uss.TotalUnreadThreads, int64(2))
|
||||
})
|
||||
|
||||
t.Run("should error when not a team member", func(t *testing.T) {
|
||||
th.UnlinkUserFromTeam(th.BasicUser, th.BasicTeam)
|
||||
defer th.LinkUserToTeam(th.BasicUser, th.BasicTeam)
|
||||
|
||||
_, resp, err := th.Client.GetUserThreads(th.BasicUser.Id, th.BasicTeam.Id, model.GetUserThreadsOpts{})
|
||||
require.Error(t, err)
|
||||
CheckForbiddenStatus(t, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestThreadSocketEvents(t *testing.T) {
|
||||
@ -6855,6 +6864,8 @@ func TestSingleThreadGet(t *testing.T) {
|
||||
})
|
||||
|
||||
client := th.Client
|
||||
|
||||
t.Run("get single thread", func(t *testing.T) {
|
||||
defer th.App.Srv().Store().Post().PermanentDeleteByUser(th.BasicUser.Id)
|
||||
defer th.App.Srv().Store().Post().PermanentDeleteByUser(th.SystemAdminUser.Id)
|
||||
|
||||
@ -6901,6 +6912,16 @@ func TestSingleThreadGet(t *testing.T) {
|
||||
tr, _, err = th.Client.GetUserThread(th.BasicUser.Id, th.BasicTeam.Id, threads.Threads[0].PostId, true)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, tr.IsUrgent)
|
||||
})
|
||||
|
||||
t.Run("should error when not a team member", func(t *testing.T) {
|
||||
th.UnlinkUserFromTeam(th.BasicUser, th.BasicTeam)
|
||||
defer th.LinkUserToTeam(th.BasicUser, th.BasicTeam)
|
||||
|
||||
_, resp, err := th.Client.GetUserThread(th.BasicUser.Id, th.BasicTeam.Id, model.NewId(), false)
|
||||
require.Error(t, err)
|
||||
CheckForbiddenStatus(t, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMaintainUnreadMentionsInThread(t *testing.T) {
|
||||
@ -7072,6 +7093,23 @@ func TestReadThreads(t *testing.T) {
|
||||
|
||||
checkThreadListReplies(t, th, th.Client, th.BasicUser.Id, 1, 1, nil)
|
||||
})
|
||||
|
||||
t.Run("should error when not a team member", func(t *testing.T) {
|
||||
th.UnlinkUserFromTeam(th.BasicUser, th.BasicTeam)
|
||||
defer th.LinkUserToTeam(th.BasicUser, th.BasicTeam)
|
||||
|
||||
_, resp, err := th.Client.UpdateThreadReadForUser(th.BasicUser.Id, th.BasicTeam.Id, model.NewId(), model.GetMillis())
|
||||
require.Error(t, err)
|
||||
CheckForbiddenStatus(t, resp)
|
||||
|
||||
_, resp, err = th.Client.SetThreadUnreadByPostId(th.BasicUser.Id, th.BasicTeam.Id, model.NewId(), model.NewId())
|
||||
require.Error(t, err)
|
||||
CheckForbiddenStatus(t, resp)
|
||||
|
||||
resp, err = th.Client.UpdateThreadsReadForUser(th.BasicUser.Id, th.BasicTeam.Id)
|
||||
require.Error(t, err)
|
||||
CheckForbiddenStatus(t, resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestMarkThreadUnreadMentionCount(t *testing.T) {
|
||||
|
@ -424,10 +424,14 @@ func TestWebSocketUpgrade(t *testing.T) {
|
||||
th := Setup(t)
|
||||
defer th.TearDown()
|
||||
|
||||
buffer := &mlog.Buffer{}
|
||||
err := mlog.AddWriterTarget(th.TestLogger, buffer, true, mlog.StdAll...)
|
||||
require.NoError(t, err)
|
||||
|
||||
url := fmt.Sprintf("http://localhost:%v", th.App.Srv().ListenAddr.Port) + model.APIURLSuffix + "/websocket"
|
||||
resp, err := http.Get(url)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp.StatusCode, http.StatusBadRequest)
|
||||
require.NoError(t, th.TestLogger.Flush())
|
||||
testlib.AssertLog(t, th.LogBuffer, mlog.LvlDebug.Name, "Failed to upgrade websocket connection.")
|
||||
testlib.AssertLog(t, buffer, mlog.LvlDebug.Name, "Failed to upgrade websocket connection.")
|
||||
}
|
||||
|
@ -119,7 +119,6 @@ func TestDoAdvancedPermissionsMigration(t *testing.T) {
|
||||
model.PermissionGetPublicLink.Id,
|
||||
model.PermissionCreatePost.Id,
|
||||
model.PermissionUseChannelMentions.Id,
|
||||
model.PermissionUseSlashCommands.Id,
|
||||
model.PermissionManagePublicChannelProperties.Id,
|
||||
model.PermissionDeletePublicChannel.Id,
|
||||
model.PermissionManagePrivateChannelProperties.Id,
|
||||
|
@ -2518,6 +2518,9 @@ func (a *App) removeUserFromChannel(c request.CTX, userIDToRemove string, remove
|
||||
if err := a.Srv().Store().ChannelMemberHistory().LogLeaveEvent(userIDToRemove, channel.Id, model.GetMillis()); err != nil {
|
||||
return model.NewAppError("removeUserFromChannel", "app.channel_member_history.log_leave_event.internal_error", nil, "", http.StatusInternalServerError).Wrap(err)
|
||||
}
|
||||
if err := a.Srv().Store().Thread().DeleteMembershipsForChannel(userIDToRemove, channel.Id); err != nil {
|
||||
return model.NewAppError("removeUserFromChannel", model.NoTranslation, nil, "failed to delete threadmemberships upon leaving channel", http.StatusInternalServerError).Wrap(err)
|
||||
}
|
||||
|
||||
if isGuest {
|
||||
currentMembers, err := a.GetChannelMembersForUser(c, channel.TeamId, userIDToRemove)
|
||||
|
@ -609,6 +609,85 @@ func TestLeaveDefaultChannel(t *testing.T) {
|
||||
_, err = th.App.GetChannelMember(th.Context, townSquare.Id, guest.Id)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Trying to leave the default channel should not delete thread memberships", func(t *testing.T) {
|
||||
post := &model.Post{
|
||||
ChannelId: townSquare.Id,
|
||||
Message: "root post",
|
||||
UserId: th.BasicUser.Id,
|
||||
}
|
||||
rpost, err := th.App.CreatePost(th.Context, post, th.BasicChannel, false, true)
|
||||
require.Nil(t, err)
|
||||
|
||||
reply := &model.Post{
|
||||
ChannelId: townSquare.Id,
|
||||
Message: "reply post",
|
||||
UserId: th.BasicUser.Id,
|
||||
RootId: rpost.Id,
|
||||
}
|
||||
_, err = th.App.CreatePost(th.Context, reply, th.BasicChannel, false, true)
|
||||
require.Nil(t, err)
|
||||
|
||||
threads, err := th.App.GetThreadsForUser(th.BasicUser.Id, townSquare.TeamId, model.GetUserThreadsOpts{})
|
||||
require.Nil(t, err)
|
||||
require.Len(t, threads.Threads, 1)
|
||||
|
||||
err = th.App.LeaveChannel(th.Context, townSquare.Id, th.BasicUser.Id)
|
||||
assert.NotNil(t, err, "It should fail to remove a regular user from the default channel")
|
||||
assert.Equal(t, err.Id, "api.channel.remove.default.app_error")
|
||||
|
||||
threads, err = th.App.GetThreadsForUser(th.BasicUser.Id, townSquare.TeamId, model.GetUserThreadsOpts{})
|
||||
require.Nil(t, err)
|
||||
require.Len(t, threads.Threads, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeaveChannel(t *testing.T) {
|
||||
th := Setup(t).InitBasic()
|
||||
defer th.TearDown()
|
||||
|
||||
createThread := func(channel *model.Channel) (rpost *model.Post) {
|
||||
t.Helper()
|
||||
post := &model.Post{
|
||||
ChannelId: channel.Id,
|
||||
Message: "root post",
|
||||
UserId: th.BasicUser.Id,
|
||||
}
|
||||
|
||||
rpost, err := th.App.CreatePost(th.Context, post, th.BasicChannel, false, true)
|
||||
require.Nil(t, err)
|
||||
|
||||
reply := &model.Post{
|
||||
ChannelId: channel.Id,
|
||||
Message: "reply post",
|
||||
UserId: th.BasicUser.Id,
|
||||
RootId: rpost.Id,
|
||||
}
|
||||
_, err = th.App.CreatePost(th.Context, reply, th.BasicChannel, false, true)
|
||||
require.Nil(t, err)
|
||||
|
||||
return rpost
|
||||
}
|
||||
|
||||
t.Run("thread memberships are deleted", func(t *testing.T) {
|
||||
createThread(th.BasicChannel)
|
||||
channel2 := th.createChannel(th.Context, th.BasicTeam, model.ChannelTypeOpen)
|
||||
createThread(channel2)
|
||||
|
||||
threads, err := th.App.GetThreadsForUser(th.BasicUser.Id, th.BasicChannel.TeamId, model.GetUserThreadsOpts{})
|
||||
require.Nil(t, err)
|
||||
require.Len(t, threads.Threads, 2)
|
||||
|
||||
err = th.App.LeaveChannel(th.Context, th.BasicChannel.Id, th.BasicUser.Id)
|
||||
require.Nil(t, err)
|
||||
|
||||
_, err = th.App.GetChannelMember(th.Context, th.BasicChannel.Id, th.BasicUser.Id)
|
||||
require.NotNil(t, err, "It should remove channel membership")
|
||||
|
||||
threads, err = th.App.GetThreadsForUser(th.BasicUser.Id, th.BasicChannel.TeamId, model.GetUserThreadsOpts{})
|
||||
require.Nil(t, err)
|
||||
require.Len(t, threads.Threads, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLeaveLastChannel(t *testing.T) {
|
||||
|
@ -459,7 +459,7 @@ func TestImportImportRole(t *testing.T) {
|
||||
// Try changing all the params and reimporting.
|
||||
data.DisplayName = ptrStr("new display name")
|
||||
data.Description = ptrStr("description")
|
||||
data.Permissions = &[]string{"use_slash_commands"}
|
||||
data.Permissions = &[]string{"manage_slash_commands"}
|
||||
|
||||
err = th.App.importRole(th.Context, &data, false, true)
|
||||
require.Nil(t, err, "Should have succeeded. %v", err)
|
||||
|
@ -71,8 +71,6 @@ func TestGetSanitizedClientLicense(t *testing.T) {
|
||||
assert.False(t, ok)
|
||||
_, ok = m["SkuName"]
|
||||
assert.False(t, ok)
|
||||
_, ok = m["SkuShortName"]
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestGenerateRenewalToken(t *testing.T) {
|
||||
|
@ -28,6 +28,24 @@ func (a *App) markAdminOnboardingComplete(c *request.Context) *model.AppError {
|
||||
}
|
||||
|
||||
func (a *App) CompleteOnboarding(c *request.Context, request *model.CompleteOnboardingRequest) *model.AppError {
|
||||
isCloud := a.Srv().License() != nil && *a.Srv().License().Features.Cloud
|
||||
|
||||
if !isCloud && request.Organization == "" {
|
||||
mlog.Error("No organization name provided for self hosted onboarding")
|
||||
return model.NewAppError("CompleteOnboarding", "api.error_no_organization_name_provided_for_self_hosted_onboarding", nil, "", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
if request.Organization != "" {
|
||||
err := a.Srv().Store().System().SaveOrUpdate(&model.System{
|
||||
Name: model.SystemOrganizationName,
|
||||
Value: request.Organization,
|
||||
})
|
||||
if err != nil {
|
||||
// don't block onboarding because of that.
|
||||
a.Log().Error("failed to save organization name", mlog.Err(err))
|
||||
}
|
||||
}
|
||||
|
||||
pluginsEnvironment := a.Channels().GetPluginsEnvironment()
|
||||
if pluginsEnvironment == nil {
|
||||
return a.markAdminOnboardingComplete(c)
|
||||
|
30
server/channels/app/onboarding_test.go
Normal file
30
server/channels/app/onboarding_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/mattermost/mattermost-server/server/v8/channels/app/request"
|
||||
mm_model "github.com/mattermost/mattermost-server/server/v8/model"
|
||||
)
|
||||
|
||||
func TestOnboardingSavesOrganizationName(t *testing.T) {
|
||||
th := Setup(t)
|
||||
defer th.TearDown()
|
||||
|
||||
err := th.App.CompleteOnboarding(&request.Context{}, &mm_model.CompleteOnboardingRequest{
|
||||
Organization: "Mattermost In Tests",
|
||||
})
|
||||
require.Nil(t, err)
|
||||
defer func() {
|
||||
th.App.Srv().Store().System().PermanentDeleteByName(mm_model.SystemOrganizationName)
|
||||
}()
|
||||
|
||||
sys, storeErr := th.App.Srv().Store().System().GetByName(mm_model.SystemOrganizationName)
|
||||
require.NoError(t, storeErr)
|
||||
require.Equal(t, "Mattermost In Tests", sys.Value)
|
||||
}
|
@ -114,7 +114,7 @@ func TestImportPermissions(t *testing.T) {
|
||||
}
|
||||
beforeCount = len(results)
|
||||
|
||||
json := fmt.Sprintf(`{"display_name":"%v","name":"%v","description":"%v","scope":"%v","default_team_admin_role":"","default_team_user_role":"","default_channel_admin_role":"%v","default_channel_user_role":"%v","roles":[{"id":"yzfx3g9xjjfw8cqo6bpn33xr7o","name":"%v","display_name":"Channel Admin Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589687,"update_at":1526475589687,"delete_at":0,"permissions":["manage_channel_roles"],"scheme_managed":true,"built_in":false},{"id":"a7s3cp4n33dfxbsrmyh9djao3a","name":"%v","display_name":"Channel User Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589688,"update_at":1526475589688,"delete_at":0,"permissions":["read_channel","add_reaction","remove_reaction","manage_public_channel_members","upload_file","get_public_link","create_post","use_slash_commands","manage_private_channel_members","delete_post","edit_post"],"scheme_managed":true,"built_in":false}]}`, displayName, name, description, scope, roleName1, roleName2, roleName1, roleName2)
|
||||
json := fmt.Sprintf(`{"display_name":"%v","name":"%v","description":"%v","scope":"%v","default_team_admin_role":"","default_team_user_role":"","default_channel_admin_role":"%v","default_channel_user_role":"%v","roles":[{"id":"yzfx3g9xjjfw8cqo6bpn33xr7o","name":"%v","display_name":"Channel Admin Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589687,"update_at":1526475589687,"delete_at":0,"permissions":["manage_channel_roles"],"scheme_managed":true,"built_in":false},{"id":"a7s3cp4n33dfxbsrmyh9djao3a","name":"%v","display_name":"Channel User Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589688,"update_at":1526475589688,"delete_at":0,"permissions":["read_channel","add_reaction","remove_reaction","manage_public_channel_members","upload_file","get_public_link","create_post","manage_private_channel_members","delete_post","edit_post"],"scheme_managed":true,"built_in":false}]}`, displayName, name, description, scope, roleName1, roleName2, roleName1, roleName2)
|
||||
r := strings.NewReader(json)
|
||||
|
||||
err := th.App.ImportPermissions(r)
|
||||
@ -183,7 +183,7 @@ func TestImportPermissions_idempotentScheme(t *testing.T) {
|
||||
roleName1 := model.NewId()
|
||||
roleName2 := model.NewId()
|
||||
|
||||
json := fmt.Sprintf(`{"display_name":"%v","name":"%v","description":"%v","scope":"%v","default_team_admin_role":"","default_team_user_role":"","default_channel_admin_role":"%v","default_channel_user_role":"%v","roles":[{"id":"yzfx3g9xjjfw8cqo6bpn33xr7o","name":"%v","display_name":"Channel Admin Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589687,"update_at":1526475589687,"delete_at":0,"permissions":["manage_channel_roles"],"scheme_managed":true,"built_in":false},{"id":"a7s3cp4n33dfxbsrmyh9djao3a","name":"%v","display_name":"Channel User Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589688,"update_at":1526475589688,"delete_at":0,"permissions":["read_channel","add_reaction","remove_reaction","manage_public_channel_members","upload_file","get_public_link","create_post","use_slash_commands","manage_private_channel_members","delete_post","edit_post"],"scheme_managed":true,"built_in":false}]}`, displayName, name, description, scope, roleName1, roleName2, roleName1, roleName2)
|
||||
json := fmt.Sprintf(`{"display_name":"%v","name":"%v","description":"%v","scope":"%v","default_team_admin_role":"","default_team_user_role":"","default_channel_admin_role":"%v","default_channel_user_role":"%v","roles":[{"id":"yzfx3g9xjjfw8cqo6bpn33xr7o","name":"%v","display_name":"Channel Admin Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589687,"update_at":1526475589687,"delete_at":0,"permissions":["manage_channel_roles"],"scheme_managed":true,"built_in":false},{"id":"a7s3cp4n33dfxbsrmyh9djao3a","name":"%v","display_name":"Channel User Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589688,"update_at":1526475589688,"delete_at":0,"permissions":["read_channel","add_reaction","remove_reaction","manage_public_channel_members","upload_file","get_public_link","create_post","manage_private_channel_members","delete_post","edit_post"],"scheme_managed":true,"built_in":false}]}`, displayName, name, description, scope, roleName1, roleName2, roleName1, roleName2)
|
||||
jsonl := strings.Repeat(json+"\n", 4)
|
||||
r := strings.NewReader(jsonl)
|
||||
|
||||
@ -226,7 +226,7 @@ func TestImportPermissions_schemeDeletedOnRoleFailure(t *testing.T) {
|
||||
roleName1 := model.NewId()
|
||||
roleName2 := model.NewId()
|
||||
|
||||
jsonl := fmt.Sprintf(`{"display_name":"%v","name":"%v","description":"%v","scope":"%v","default_team_admin_role":"","default_team_user_role":"","default_channel_admin_role":"%v","default_channel_user_role":"%v","roles":[{"id":"yzfx3g9xjjfw8cqo6bpn33xr7o","name":"%v","display_name":"Channel Admin Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589687,"update_at":1526475589687,"delete_at":0,"permissions":["manage_channel_roles"],"scheme_managed":true,"built_in":false},{"id":"a7s3cp4n33dfxbsrmyh9djao3a","name":"%v","display_name":"Channel User Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589688,"update_at":1526475589688,"delete_at":0,"permissions":["read_channel","add_reaction","remove_reaction","manage_public_channel_members","upload_file","get_public_link","create_post","use_slash_commands","manage_private_channel_members","delete_post","edit_post"],"scheme_managed":true,"built_in":false}]}`, displayName, name, description, scope, roleName1, roleName2, roleName1, roleName2)
|
||||
jsonl := fmt.Sprintf(`{"display_name":"%v","name":"%v","description":"%v","scope":"%v","default_team_admin_role":"","default_team_user_role":"","default_channel_admin_role":"%v","default_channel_user_role":"%v","roles":[{"id":"yzfx3g9xjjfw8cqo6bpn33xr7o","name":"%v","display_name":"Channel Admin Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589687,"update_at":1526475589687,"delete_at":0,"permissions":["manage_channel_roles"],"scheme_managed":true,"built_in":false},{"id":"a7s3cp4n33dfxbsrmyh9djao3a","name":"%v","display_name":"Channel User Role for Scheme my_scheme_1526475590","description":"","create_at":1526475589688,"update_at":1526475589688,"delete_at":0,"permissions":["read_channel","add_reaction","remove_reaction","manage_public_channel_members","upload_file","get_public_link","create_post","manage_private_channel_members","delete_post","edit_post"],"scheme_managed":true,"built_in":false}]}`, displayName, name, description, scope, roleName1, roleName2, roleName1, roleName2)
|
||||
r := strings.NewReader(jsonl)
|
||||
|
||||
var results []*model.Scheme
|
||||
|
@ -71,8 +71,6 @@ func TestGetSanitizedClientLicense(t *testing.T) {
|
||||
assert.False(t, ok)
|
||||
_, ok = m["SkuName"]
|
||||
assert.False(t, ok)
|
||||
_, ok = m["SkuShortName"]
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestGenerateRenewalToken(t *testing.T) {
|
||||
|
@ -203,10 +203,12 @@ func (ch *Channels) InstallMarketplacePlugin(request *model.InstallMarketplacePl
|
||||
if *ch.cfgSvc.Config().PluginSettings.EnableRemoteMarketplace {
|
||||
var plugin *model.BaseMarketplacePlugin
|
||||
plugin, appErr = ch.getRemoteMarketplacePlugin(request.Id, request.Version)
|
||||
if appErr != nil {
|
||||
return nil, appErr
|
||||
// The plugin might only be prepackaged and not on the Marketplace.
|
||||
if appErr != nil && appErr.Id != "app.plugin.marketplace_plugins.not_found.app_error" {
|
||||
mlog.Warn("Failed to reach Marketplace to install plugin", mlog.String("plugin_id", request.Id), mlog.Err(appErr))
|
||||
}
|
||||
|
||||
if plugin != nil {
|
||||
var prepackagedVersion semver.Version
|
||||
if prepackagedPlugin != nil {
|
||||
var err error
|
||||
@ -234,6 +236,7 @@ func (ch *Channels) InstallMarketplacePlugin(request *model.InstallMarketplacePl
|
||||
signatureFile = signature
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pluginFile == nil {
|
||||
return nil, model.NewAppError("InstallMarketplacePlugin", "app.plugin.marketplace_plugins.not_found.app_error", nil, "", http.StatusInternalServerError)
|
||||
|
@ -93,6 +93,7 @@ var wt{{.MD5}} = &WorkTemplate{
|
||||
Illustration: "{{.Playbook.Illustration}}",
|
||||
},{{end}}{{if .Integration}}Integration: &Integration{
|
||||
ID: "{{.Integration.ID}}",
|
||||
Recommended: {{.Integration.Recommended}},
|
||||
},{{end}}
|
||||
},
|
||||
{{end}}
|
||||
|
@ -45,8 +45,10 @@ content:
|
||||
illustration: "/static/worktemplates/playbooks/product_release.png"
|
||||
- integration:
|
||||
id: jira
|
||||
recommended: true
|
||||
- integration:
|
||||
id: github
|
||||
recommended: true
|
||||
---
|
||||
id: 'product_teams/goals_and_okrs:v1'
|
||||
category: product_teams
|
||||
@ -86,7 +88,7 @@ content:
|
||||
channel: channel-1674845108569
|
||||
- integration:
|
||||
id: zoom
|
||||
|
||||
recommended: true
|
||||
---
|
||||
id: 'product_teams/bug_bash:v1'
|
||||
category: product_teams
|
||||
@ -120,6 +122,7 @@ content:
|
||||
playbook: playbook-1674844017943
|
||||
- integration:
|
||||
id: jira
|
||||
recommended: true
|
||||
---
|
||||
id: 'product_teams/sprint_planning:v1'
|
||||
category: product_teams
|
||||
@ -153,6 +156,7 @@ content:
|
||||
channel: channel-1674850783500
|
||||
- integration:
|
||||
id: zoom
|
||||
recommended: true
|
||||
---
|
||||
id: 'product_teams/product_roadmap:v1'
|
||||
category: product_teams
|
||||
@ -282,6 +286,7 @@ content:
|
||||
channel: channel-1674845108569
|
||||
- integration:
|
||||
id: zoom
|
||||
recommended: true
|
||||
---
|
||||
id: 'companywide/create_project:v1'
|
||||
category: companywide
|
||||
@ -316,10 +321,13 @@ content:
|
||||
channel: channel-1674851940114
|
||||
- integration:
|
||||
id: jira
|
||||
recommended: true
|
||||
- integration:
|
||||
id: github
|
||||
recommended: true
|
||||
- integration:
|
||||
id: zoom
|
||||
recommended: true
|
||||
---
|
||||
######################
|
||||
# Leadership
|
||||
@ -356,4 +364,4 @@ content:
|
||||
channel: channel-1674845108569
|
||||
- integration:
|
||||
id: zoom
|
||||
|
||||
recommended: true
|
||||
|
@ -109,6 +109,7 @@ func (wt WorkTemplate) ToModelWorkTemplate(t i18n.TranslateFunc) *model.WorkTemp
|
||||
mwt.Content = append(mwt.Content, model.WorkTemplateContent{
|
||||
Integration: &model.WorkTemplateIntegration{
|
||||
ID: content.Integration.ID,
|
||||
Recommended: content.Integration.Recommended,
|
||||
},
|
||||
})
|
||||
}
|
||||
@ -321,6 +322,7 @@ func (p *Playbook) Validate() error {
|
||||
|
||||
type Integration struct {
|
||||
ID string `yaml:"id"`
|
||||
Recommended bool `yaml:"recommended"`
|
||||
}
|
||||
|
||||
func (i *Integration) Validate() error {
|
||||
|
@ -149,11 +149,13 @@ var wt00a1b44a5831c0a3acb14787b3fdd352 = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "jira",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "github",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -215,6 +217,7 @@ var wt5baa68055bf9ea423273662e01ccc575 = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "zoom",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -266,6 +269,7 @@ var wtfeb56bc6a8f277c47b503bd1c92d830e = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "jira",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -318,6 +322,7 @@ var wt8d2ef53deac5517eb349dc5de6150196 = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "zoom",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -519,6 +524,7 @@ var wtf7b846d35810f8272eeb9a1a562025b5 = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "zoom",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -571,16 +577,19 @@ var wtb9ab412890c2410c7b49eec8f12e7edc = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "jira",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "github",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "zoom",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -633,6 +642,7 @@ var wt32ab773bfe021e3d4913931041552559 = &WorkTemplate{
|
||||
{
|
||||
Integration: &Integration{
|
||||
ID: "zoom",
|
||||
Recommended: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -212,6 +212,8 @@ channels/db/migrations/mysql/000105_remove_tokens.down.sql
|
||||
channels/db/migrations/mysql/000105_remove_tokens.up.sql
|
||||
channels/db/migrations/mysql/000106_fileinfo_channelid.down.sql
|
||||
channels/db/migrations/mysql/000106_fileinfo_channelid.up.sql
|
||||
channels/db/migrations/mysql/000107_threadmemberships_cleanup.down.sql
|
||||
channels/db/migrations/mysql/000107_threadmemberships_cleanup.up.sql
|
||||
channels/db/migrations/postgres/000001_create_teams.down.sql
|
||||
channels/db/migrations/postgres/000001_create_teams.up.sql
|
||||
channels/db/migrations/postgres/000002_create_team_members.down.sql
|
||||
@ -424,3 +426,5 @@ channels/db/migrations/postgres/000105_remove_tokens.down.sql
|
||||
channels/db/migrations/postgres/000105_remove_tokens.up.sql
|
||||
channels/db/migrations/postgres/000106_fileinfo_channelid.down.sql
|
||||
channels/db/migrations/postgres/000106_fileinfo_channelid.up.sql
|
||||
channels/db/migrations/postgres/000107_threadmemberships_cleanup.down.sql
|
||||
channels/db/migrations/postgres/000107_threadmemberships_cleanup.up.sql
|
||||
|
@ -0,0 +1 @@
|
||||
-- Skipping it because the forward migrations are destructive
|
@ -0,0 +1,5 @@
|
||||
DELETE FROM
|
||||
tm USING ThreadMemberships AS tm
|
||||
JOIN Threads ON Threads.PostId = tm.PostId
|
||||
WHERE
|
||||
(tm.UserId, Threads.ChannelId) NOT IN (SELECT UserId, ChannelId FROM ChannelMembers);
|
@ -0,0 +1 @@
|
||||
-- Skipping it because the forward migrations are destructive
|
@ -0,0 +1,12 @@
|
||||
DELETE FROM threadmemberships WHERE (postid, userid) IN (
|
||||
SELECT
|
||||
threadmemberships.postid,
|
||||
threadmemberships.userid
|
||||
FROM
|
||||
threadmemberships
|
||||
JOIN threads ON threads.postid = threadmemberships.postid
|
||||
LEFT JOIN channelmembers ON channelmembers.userid = threadmemberships.userid
|
||||
AND threads.channelid = channelmembers.channelid
|
||||
WHERE
|
||||
channelmembers.channelid IS NULL
|
||||
);
|
@ -37,6 +37,7 @@ type CloudInterface interface {
|
||||
BootstrapSelfHostedSignup(req model.BootstrapSelfHostedSignupRequest) (*model.BootstrapSelfHostedSignupResponse, error)
|
||||
CreateCustomerSelfHostedSignup(req model.SelfHostedCustomerForm, requesterEmail string) (*model.SelfHostedSignupCustomerResponse, error)
|
||||
ConfirmSelfHostedSignup(req model.SelfHostedConfirmPaymentMethodRequest, requesterEmail string) (*model.SelfHostedSignupConfirmResponse, error)
|
||||
ConfirmSelfHostedExpansion(req model.SelfHostedConfirmPaymentMethodRequest, requesterEmail string) (*model.SelfHostedSignupConfirmResponse, error)
|
||||
ConfirmSelfHostedSignupLicenseApplication() error
|
||||
GetSelfHostedInvoices() ([]*model.Invoice, error)
|
||||
GetSelfHostedInvoicePDF(invoiceID string) ([]byte, string, error)
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
type MetricsInterface interface {
|
||||
Register()
|
||||
RegisterDBCollector(db *sql.DB, name string)
|
||||
UnregisterDBCollector(db *sql.DB, name string)
|
||||
|
||||
IncrementPostCreate()
|
||||
IncrementWebhookPost()
|
||||
|
@ -94,6 +94,32 @@ func (_m *CloudInterface) ConfirmCustomerPayment(userID string, confirmRequest *
|
||||
return r0
|
||||
}
|
||||
|
||||
// ConfirmSelfHostedExpansion provides a mock function with given fields: req, requesterEmail
|
||||
func (_m *CloudInterface) ConfirmSelfHostedExpansion(req model.SelfHostedConfirmPaymentMethodRequest, requesterEmail string) (*model.SelfHostedSignupConfirmResponse, error) {
|
||||
ret := _m.Called(req, requesterEmail)
|
||||
|
||||
var r0 *model.SelfHostedSignupConfirmResponse
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(model.SelfHostedConfirmPaymentMethodRequest, string) (*model.SelfHostedSignupConfirmResponse, error)); ok {
|
||||
return rf(req, requesterEmail)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(model.SelfHostedConfirmPaymentMethodRequest, string) *model.SelfHostedSignupConfirmResponse); ok {
|
||||
r0 = rf(req, requesterEmail)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*model.SelfHostedSignupConfirmResponse)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(model.SelfHostedConfirmPaymentMethodRequest, string) error); ok {
|
||||
r1 = rf(req, requesterEmail)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ConfirmSelfHostedSignup provides a mock function with given fields: req, requesterEmail
|
||||
func (_m *CloudInterface) ConfirmSelfHostedSignup(req model.SelfHostedConfirmPaymentMethodRequest, requesterEmail string) (*model.SelfHostedSignupConfirmResponse, error) {
|
||||
ret := _m.Called(req, requesterEmail)
|
||||
|
@ -319,6 +319,11 @@ func (_m *MetricsInterface) SetReplicaLagTime(node string, value float64) {
|
||||
_m.Called(node, value)
|
||||
}
|
||||
|
||||
// UnregisterDBCollector provides a mock function with given fields: db, name
|
||||
func (_m *MetricsInterface) UnregisterDBCollector(db *sql.DB, name string) {
|
||||
_m.Called(db, name)
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewMetricsInterface interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
|
@ -10123,6 +10123,24 @@ func (s *OpenTracingLayerThreadStore) DeleteMembershipForUser(userId string, pos
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *OpenTracingLayerThreadStore) DeleteMembershipsForChannel(userID string, channelID string) error {
|
||||
origCtx := s.Root.Store.Context()
|
||||
span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "ThreadStore.DeleteMembershipsForChannel")
|
||||
s.Root.Store.SetContext(newCtx)
|
||||
defer func() {
|
||||
s.Root.Store.SetContext(origCtx)
|
||||
}()
|
||||
|
||||
defer span.Finish()
|
||||
err := s.ThreadStore.DeleteMembershipsForChannel(userID, channelID)
|
||||
if err != nil {
|
||||
span.LogFields(spanlog.Error(err))
|
||||
ext.Error.Set(span, true)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *OpenTracingLayerThreadStore) DeleteOrphanedRows(limit int) (int64, error) {
|
||||
origCtx := s.Root.Store.Context()
|
||||
span, newCtx := tracing.StartSpanWithParentByContext(s.Root.Store.Context(), "ThreadStore.DeleteOrphanedRows")
|
||||
|
@ -11563,6 +11563,27 @@ func (s *RetryLayerThreadStore) DeleteMembershipForUser(userId string, postID st
|
||||
|
||||
}
|
||||
|
||||
func (s *RetryLayerThreadStore) DeleteMembershipsForChannel(userID string, channelID string) error {
|
||||
|
||||
tries := 0
|
||||
for {
|
||||
err := s.ThreadStore.DeleteMembershipsForChannel(userID, channelID)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !isRepeatableError(err) {
|
||||
return err
|
||||
}
|
||||
tries++
|
||||
if tries >= 3 {
|
||||
err = errors.Wrap(err, "giving up after 3 consecutive repeatable transaction failures")
|
||||
return err
|
||||
}
|
||||
timepkg.Sleep(100 * timepkg.Millisecond)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (s *RetryLayerThreadStore) DeleteOrphanedRows(limit int) (int64, error) {
|
||||
|
||||
tries := 0
|
||||
|
@ -3035,7 +3035,8 @@ func (s SqlChannelStore) Autocomplete(userID, term string, includeDeleted, isGue
|
||||
sq.Expr("t.id = tm.TeamId"),
|
||||
sq.Eq{"tm.UserId": userID},
|
||||
}).
|
||||
OrderBy("c.DisplayName")
|
||||
OrderBy("c.DisplayName").
|
||||
Limit(model.ChannelSearchDefaultLimit)
|
||||
|
||||
if !includeDeleted {
|
||||
query = query.Where(sq.And{
|
||||
@ -3073,7 +3074,7 @@ func (s SqlChannelStore) Autocomplete(userID, term string, includeDeleted, isGue
|
||||
channels := model.ChannelListWithTeamData{}
|
||||
err = s.GetReplicaX().Select(&channels, sql, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not find channel with term=%s", term)
|
||||
return nil, errors.Wrapf(err, "could not find channel with term=%s", trimInput(term))
|
||||
}
|
||||
return channels, nil
|
||||
}
|
||||
@ -3186,7 +3187,7 @@ func (s SqlChannelStore) AutocompleteInTeamForSearch(teamID string, userID strin
|
||||
// query the database
|
||||
err = s.GetReplicaX().Select(&channels, sql, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to find Channels with term='%s'", term)
|
||||
return nil, errors.Wrapf(err, "failed to find Channels with term='%s'", trimInput(term))
|
||||
}
|
||||
|
||||
directChannels, err := s.autocompleteInTeamForSearchDirectMessages(userID, term)
|
||||
@ -3242,7 +3243,7 @@ func (s SqlChannelStore) autocompleteInTeamForSearchDirectMessages(userID string
|
||||
// query the channel list from the database using SQLX
|
||||
channels := model.ChannelList{}
|
||||
if err := s.GetReplicaX().Select(&channels, sql, args...); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to find Channels with term='%s' (%s %% %v)", term, sql, args)
|
||||
return nil, errors.Wrapf(err, "failed to find Channels with term='%s'", trimInput(term))
|
||||
}
|
||||
|
||||
return channels, nil
|
||||
@ -3461,7 +3462,7 @@ func (s SqlChannelStore) SearchAllChannels(term string, opts store.ChannelSearch
|
||||
}
|
||||
channels := model.ChannelListWithTeamData{}
|
||||
if err2 := s.GetReplicaX().Select(&channels, queryString, args...); err2 != nil {
|
||||
return nil, 0, errors.Wrapf(err2, "failed to find Channels with term='%s'", term)
|
||||
return nil, 0, errors.Wrapf(err2, "failed to find Channels with term='%s'", trimInput(term))
|
||||
}
|
||||
|
||||
var totalCount int64
|
||||
@ -3474,7 +3475,7 @@ func (s SqlChannelStore) SearchAllChannels(term string, opts store.ChannelSearch
|
||||
return nil, 0, errors.Wrap(err, "channel_tosql")
|
||||
}
|
||||
if err2 := s.GetReplicaX().Get(&totalCount, queryString, args...); err2 != nil {
|
||||
return nil, 0, errors.Wrapf(err2, "failed to find Channels with term='%s'", term)
|
||||
return nil, 0, errors.Wrapf(err2, "failed to find Channels with term='%s'", trimInput(term))
|
||||
}
|
||||
} else {
|
||||
totalCount = int64(len(channels))
|
||||
@ -3651,7 +3652,7 @@ func (s SqlChannelStore) performSearch(searchQuery sq.SelectBuilder, term string
|
||||
channels := model.ChannelList{}
|
||||
err = s.GetReplicaX().Select(&channels, sql, args...)
|
||||
if err != nil {
|
||||
return channels, errors.Wrapf(err, "failed to find Channels with term='%s'", term)
|
||||
return channels, errors.Wrapf(err, "failed to find Channels with term='%s'", trimInput(term))
|
||||
}
|
||||
|
||||
return channels, nil
|
||||
@ -3744,7 +3745,7 @@ func (s SqlChannelStore) SearchGroupChannels(userId, term string) (model.Channel
|
||||
|
||||
groupChannels := model.ChannelList{}
|
||||
if err := s.GetReplicaX().Select(&groupChannels, sql, params...); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to find Channels with term='%s' and userId=%s", term, userId)
|
||||
return nil, errors.Wrapf(err, "failed to find Channels with term='%s' and userId=%s", trimInput(term), userId)
|
||||
}
|
||||
return groupChannels, nil
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ func (s SqlChannelStore) CreateSidebarCategory(userId, teamId string, newCategor
|
||||
Id: newCategoryId,
|
||||
UserId: userId,
|
||||
TeamId: teamId,
|
||||
Sorting: model.SidebarCategorySortDefault,
|
||||
Sorting: newCategory.Sorting,
|
||||
SortOrder: int64(model.MinimalSidebarSortDistance * len(newOrder)), // first we place it at the end of the list
|
||||
Type: model.SidebarCategoryCustom,
|
||||
Muted: newCategory.Muted,
|
||||
|
@ -681,7 +681,7 @@ func (fs SqlFileInfoStore) Search(paramsList []*model.SearchParams, userId, team
|
||||
items := []fileInfoWithChannelID{}
|
||||
err = fs.GetSearchReplicaX().Select(&items, queryString, args...)
|
||||
if err != nil {
|
||||
mlog.Warn("Query error searching files.", mlog.Err(err))
|
||||
mlog.Warn("Query error searching files.", mlog.String("error", trimInput(err.Error())))
|
||||
// Don't return the error to the caller as it is of no use to the user. Instead return an empty set of search results.
|
||||
} else {
|
||||
for _, item := range items {
|
||||
|
@ -2075,7 +2075,7 @@ func (s *SqlPostStore) search(teamId string, userId string, params *model.Search
|
||||
var posts []*model.Post
|
||||
|
||||
if err := s.GetSearchReplicaX().Select(&posts, searchQuery, searchQueryArgs...); err != nil {
|
||||
mlog.Warn("Query error searching posts.", mlog.Err(err))
|
||||
mlog.Warn("Query error searching posts.", mlog.String("error", trimInput(err.Error())))
|
||||
// Don't return the error to the caller as it is of no use to the user. Instead return an empty set of search results.
|
||||
} else {
|
||||
for _, p := range posts {
|
||||
|
@ -6,9 +6,12 @@ package sqlstore
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"net"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
@ -66,14 +69,18 @@ type sqlxDBWrapper struct {
|
||||
*sqlx.DB
|
||||
queryTimeout time.Duration
|
||||
trace bool
|
||||
isOnline *atomic.Bool
|
||||
}
|
||||
|
||||
func newSqlxDBWrapper(db *sqlx.DB, timeout time.Duration, trace bool) *sqlxDBWrapper {
|
||||
return &sqlxDBWrapper{
|
||||
w := &sqlxDBWrapper{
|
||||
DB: db,
|
||||
queryTimeout: timeout,
|
||||
trace: trace,
|
||||
isOnline: &atomic.Bool{},
|
||||
}
|
||||
w.isOnline.Store(true)
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) Stats() sql.DBStats {
|
||||
@ -83,19 +90,19 @@ func (w *sqlxDBWrapper) Stats() sql.DBStats {
|
||||
func (w *sqlxDBWrapper) Beginx() (*sqlxTxWrapper, error) {
|
||||
tx, err := w.DB.Beginx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, w.checkErr(err)
|
||||
}
|
||||
|
||||
return newSqlxTxWrapper(tx, w.queryTimeout, w.trace), nil
|
||||
return newSqlxTxWrapper(tx, w.queryTimeout, w.trace, w), nil
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) BeginXWithIsolation(opts *sql.TxOptions) (*sqlxTxWrapper, error) {
|
||||
tx, err := w.DB.BeginTxx(context.Background(), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, w.checkErr(err)
|
||||
}
|
||||
|
||||
return newSqlxTxWrapper(tx, w.queryTimeout, w.trace), nil
|
||||
return newSqlxTxWrapper(tx, w.queryTimeout, w.trace, w), nil
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) Get(dest any, query string, args ...any) error {
|
||||
@ -109,7 +116,7 @@ func (w *sqlxDBWrapper) Get(dest any, query string, args ...any) error {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.GetContext(ctx, dest, query, args...)
|
||||
return w.checkErr(w.DB.GetContext(ctx, dest, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) GetBuilder(dest any, builder Builder) error {
|
||||
@ -134,7 +141,7 @@ func (w *sqlxDBWrapper) NamedExec(query string, arg any) (sql.Result, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.NamedExecContext(ctx, query, arg)
|
||||
return w.checkErrWithResult(w.DB.NamedExecContext(ctx, query, arg))
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) Exec(query string, args ...any) (sql.Result, error) {
|
||||
@ -161,7 +168,7 @@ func (w *sqlxDBWrapper) ExecNoTimeout(query string, args ...any) (sql.Result, er
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.ExecContext(context.Background(), query, args...)
|
||||
return w.checkErrWithResult(w.DB.ExecContext(context.Background(), query, args...))
|
||||
}
|
||||
|
||||
// ExecRaw is like Exec but without any rebinding of params. You need to pass
|
||||
@ -176,7 +183,7 @@ func (w *sqlxDBWrapper) ExecRaw(query string, args ...any) (sql.Result, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.ExecContext(ctx, query, args...)
|
||||
return w.checkErrWithResult(w.DB.ExecContext(ctx, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) NamedQuery(query string, arg any) (*sqlx.Rows, error) {
|
||||
@ -192,7 +199,7 @@ func (w *sqlxDBWrapper) NamedQuery(query string, arg any) (*sqlx.Rows, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.NamedQueryContext(ctx, query, arg)
|
||||
return w.checkErrWithRows(w.DB.NamedQueryContext(ctx, query, arg))
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) QueryRowX(query string, args ...any) *sqlx.Row {
|
||||
@ -220,7 +227,7 @@ func (w *sqlxDBWrapper) QueryX(query string, args ...any) (*sqlx.Rows, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.QueryxContext(ctx, query, args)
|
||||
return w.checkErrWithRows(w.DB.QueryxContext(ctx, query, args))
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) Select(dest any, query string, args ...any) error {
|
||||
@ -238,7 +245,7 @@ func (w *sqlxDBWrapper) SelectCtx(ctx context.Context, dest any, query string, a
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.DB.SelectContext(ctx, dest, query, args...)
|
||||
return w.checkErr(w.DB.SelectContext(ctx, dest, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) SelectBuilder(dest any, builder Builder) error {
|
||||
@ -254,13 +261,15 @@ type sqlxTxWrapper struct {
|
||||
*sqlx.Tx
|
||||
queryTimeout time.Duration
|
||||
trace bool
|
||||
dbw *sqlxDBWrapper
|
||||
}
|
||||
|
||||
func newSqlxTxWrapper(tx *sqlx.Tx, timeout time.Duration, trace bool) *sqlxTxWrapper {
|
||||
func newSqlxTxWrapper(tx *sqlx.Tx, timeout time.Duration, trace bool, dbw *sqlxDBWrapper) *sqlxTxWrapper {
|
||||
return &sqlxTxWrapper{
|
||||
Tx: tx,
|
||||
queryTimeout: timeout,
|
||||
trace: trace,
|
||||
dbw: dbw,
|
||||
}
|
||||
}
|
||||
|
||||
@ -275,7 +284,7 @@ func (w *sqlxTxWrapper) Get(dest any, query string, args ...any) error {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.Tx.GetContext(ctx, dest, query, args...)
|
||||
return w.dbw.checkErr(w.Tx.GetContext(ctx, dest, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) GetBuilder(dest any, builder Builder) error {
|
||||
@ -284,13 +293,13 @@ func (w *sqlxTxWrapper) GetBuilder(dest any, builder Builder) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return w.Get(dest, query, args...)
|
||||
return w.dbw.checkErr(w.Get(dest, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) Exec(query string, args ...any) (sql.Result, error) {
|
||||
query = w.Tx.Rebind(query)
|
||||
|
||||
return w.ExecRaw(query, args...)
|
||||
return w.dbw.checkErrWithResult(w.ExecRaw(query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) ExecNoTimeout(query string, args ...any) (sql.Result, error) {
|
||||
@ -302,7 +311,7 @@ func (w *sqlxTxWrapper) ExecNoTimeout(query string, args ...any) (sql.Result, er
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.Tx.ExecContext(context.Background(), query, args...)
|
||||
return w.dbw.checkErrWithResult(w.Tx.ExecContext(context.Background(), query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) ExecBuilder(builder Builder) (sql.Result, error) {
|
||||
@ -326,7 +335,7 @@ func (w *sqlxTxWrapper) ExecRaw(query string, args ...any) (sql.Result, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.Tx.ExecContext(ctx, query, args...)
|
||||
return w.dbw.checkErrWithResult(w.Tx.ExecContext(ctx, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) NamedExec(query string, arg any) (sql.Result, error) {
|
||||
@ -342,7 +351,7 @@ func (w *sqlxTxWrapper) NamedExec(query string, arg any) (sql.Result, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.Tx.NamedExecContext(ctx, query, arg)
|
||||
return w.dbw.checkErrWithResult(w.Tx.NamedExecContext(ctx, query, arg))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) NamedQuery(query string, arg any) (*sqlx.Rows, error) {
|
||||
@ -386,7 +395,7 @@ func (w *sqlxTxWrapper) NamedQuery(query string, arg any) (*sqlx.Rows, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return res.rows, res.err
|
||||
return res.rows, w.dbw.checkErr(res.err)
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) QueryRowX(query string, args ...any) *sqlx.Row {
|
||||
@ -414,7 +423,7 @@ func (w *sqlxTxWrapper) QueryX(query string, args ...any) (*sqlx.Rows, error) {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.Tx.QueryxContext(ctx, query, args)
|
||||
return w.dbw.checkErrWithRows(w.Tx.QueryxContext(ctx, query, args))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) Select(dest any, query string, args ...any) error {
|
||||
@ -428,7 +437,7 @@ func (w *sqlxTxWrapper) Select(dest any, query string, args ...any) error {
|
||||
}(time.Now())
|
||||
}
|
||||
|
||||
return w.Tx.SelectContext(ctx, dest, query, args...)
|
||||
return w.dbw.checkErr(w.Tx.SelectContext(ctx, dest, query, args...))
|
||||
}
|
||||
|
||||
func (w *sqlxTxWrapper) SelectBuilder(dest any, builder Builder) error {
|
||||
@ -459,3 +468,23 @@ func printArgs(query string, dur time.Duration, args ...any) {
|
||||
}
|
||||
mlog.Debug(query, fields...)
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) checkErrWithResult(res sql.Result, err error) (sql.Result, error) {
|
||||
return res, w.checkErr(err)
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) checkErrWithRows(res *sqlx.Rows, err error) (*sqlx.Rows, error) {
|
||||
return res, w.checkErr(err)
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) checkErr(err error) error {
|
||||
var netError *net.OpError
|
||||
if errors.As(err, &netError) && (!netError.Temporary() && !netError.Timeout()) {
|
||||
w.isOnline.Store(false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *sqlxDBWrapper) Online() bool {
|
||||
return w.isOnline.Load()
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ package sqlstore
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -31,9 +32,11 @@ func TestSqlX(t *testing.T) {
|
||||
rrCounter: 0,
|
||||
srCounter: 0,
|
||||
settings: settings,
|
||||
quitMonitor: make(chan struct{}),
|
||||
wgMonitor: &sync.WaitGroup{},
|
||||
}
|
||||
|
||||
store.initConnection()
|
||||
require.NoError(t, store.initConnection())
|
||||
|
||||
defer store.Close()
|
||||
|
||||
|
@ -49,7 +49,7 @@ const (
|
||||
MySQLForeignKeyViolationErrorCode = 1452
|
||||
PGDuplicateObjectErrorCode = "42710"
|
||||
MySQLDuplicateObjectErrorCode = 1022
|
||||
DBPingAttempts = 18
|
||||
DBPingAttempts = 5
|
||||
DBPingTimeoutSecs = 10
|
||||
// This is a numerical version string by postgres. The format is
|
||||
// 2 characters for major, minor, and patch version prior to 10.
|
||||
@ -123,9 +123,9 @@ type SqlStore struct {
|
||||
|
||||
masterX *sqlxDBWrapper
|
||||
|
||||
ReplicaXs []*sqlxDBWrapper
|
||||
ReplicaXs []*atomic.Pointer[sqlxDBWrapper]
|
||||
|
||||
searchReplicaXs []*sqlxDBWrapper
|
||||
searchReplicaXs []*atomic.Pointer[sqlxDBWrapper]
|
||||
|
||||
replicaLagHandles []*dbsql.DB
|
||||
stores SqlStoreStores
|
||||
@ -138,6 +138,9 @@ type SqlStore struct {
|
||||
|
||||
isBinaryParam bool
|
||||
pgDefaultTextSearchConfig string
|
||||
|
||||
quitMonitor chan struct{}
|
||||
wgMonitor *sync.WaitGroup
|
||||
}
|
||||
|
||||
func New(settings model.SqlSettings, metrics einterfaces.MetricsInterface) *SqlStore {
|
||||
@ -146,9 +149,17 @@ func New(settings model.SqlSettings, metrics einterfaces.MetricsInterface) *SqlS
|
||||
srCounter: 0,
|
||||
settings: &settings,
|
||||
metrics: metrics,
|
||||
quitMonitor: make(chan struct{}),
|
||||
wgMonitor: &sync.WaitGroup{},
|
||||
}
|
||||
|
||||
store.initConnection()
|
||||
err := store.initConnection()
|
||||
if err != nil {
|
||||
mlog.Fatal("Error setting up connections", mlog.Err(err))
|
||||
}
|
||||
|
||||
store.wgMonitor.Add(1)
|
||||
go store.monitorReplicas()
|
||||
|
||||
ver, err := store.GetDbVersion(true)
|
||||
if err != nil {
|
||||
@ -230,29 +241,28 @@ func New(settings model.SqlSettings, metrics einterfaces.MetricsInterface) *SqlS
|
||||
|
||||
// SetupConnection sets up the connection to the database and pings it to make sure it's alive.
|
||||
// It also applies any database configuration settings that are required.
|
||||
func SetupConnection(connType string, dataSource string, settings *model.SqlSettings) *dbsql.DB {
|
||||
func SetupConnection(connType string, dataSource string, settings *model.SqlSettings, attempts int) (*dbsql.DB, error) {
|
||||
db, err := dbsql.Open(*settings.DriverName, dataSource)
|
||||
if err != nil {
|
||||
mlog.Fatal("Failed to open SQL connection to err.", mlog.Err(err))
|
||||
return nil, errors.Wrap(err, "failed to open SQL connection")
|
||||
}
|
||||
|
||||
for i := 0; i < DBPingAttempts; i++ {
|
||||
for i := 0; i < attempts; i++ {
|
||||
// At this point, we have passed sql.Open, so we deliberately ignore any errors.
|
||||
sanitized, _ := SanitizeDataSource(*settings.DriverName, dataSource)
|
||||
mlog.Info("Pinging SQL", mlog.String("database", connType), mlog.String("dataSource", sanitized))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), DBPingTimeoutSecs*time.Second)
|
||||
defer cancel()
|
||||
err = db.PingContext(ctx)
|
||||
if err == nil {
|
||||
break
|
||||
} else {
|
||||
if i == DBPingAttempts-1 {
|
||||
mlog.Fatal("Failed to ping DB, server will exit.", mlog.Err(err))
|
||||
} else {
|
||||
if err != nil {
|
||||
if i == attempts-1 {
|
||||
return nil, err
|
||||
}
|
||||
mlog.Error("Failed to ping DB", mlog.Err(err), mlog.Int("retrying in seconds", DBPingTimeoutSecs))
|
||||
time.Sleep(DBPingTimeoutSecs * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if strings.HasPrefix(connType, replicaLagPrefix) {
|
||||
@ -272,7 +282,7 @@ func SetupConnection(connType string, dataSource string, settings *model.SqlSett
|
||||
db.SetConnMaxLifetime(time.Duration(*settings.ConnMaxLifetimeMilliseconds) * time.Millisecond)
|
||||
db.SetConnMaxIdleTime(time.Duration(*settings.ConnMaxIdleTimeMilliseconds) * time.Millisecond)
|
||||
|
||||
return db
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (ss *SqlStore) SetContext(context context.Context) {
|
||||
@ -285,7 +295,7 @@ func (ss *SqlStore) Context() context.Context {
|
||||
|
||||
func noOpMapper(s string) string { return s }
|
||||
|
||||
func (ss *SqlStore) initConnection() {
|
||||
func (ss *SqlStore) initConnection() error {
|
||||
dataSource := *ss.settings.DataSource
|
||||
if ss.DriverName() == model.DatabaseDriverMysql {
|
||||
// TODO: We ignore the readTimeout datasource parameter for MySQL since QueryTimeout
|
||||
@ -294,11 +304,14 @@ func (ss *SqlStore) initConnection() {
|
||||
var err error
|
||||
dataSource, err = ResetReadTimeout(dataSource)
|
||||
if err != nil {
|
||||
mlog.Fatal("Failed to reset read timeout from datasource.", mlog.Err(err), mlog.String("src", dataSource))
|
||||
return errors.Wrap(err, "failed to reset read timeout from datasource")
|
||||
}
|
||||
}
|
||||
|
||||
handle := SetupConnection("master", dataSource, ss.settings)
|
||||
handle, err := SetupConnection("master", dataSource, ss.settings, DBPingAttempts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ss.masterX = newSqlxDBWrapper(sqlx.NewDb(handle, ss.DriverName()),
|
||||
time.Duration(*ss.settings.QueryTimeout)*time.Second,
|
||||
*ss.settings.Trace)
|
||||
@ -310,34 +323,32 @@ func (ss *SqlStore) initConnection() {
|
||||
}
|
||||
|
||||
if len(ss.settings.DataSourceReplicas) > 0 {
|
||||
ss.ReplicaXs = make([]*sqlxDBWrapper, len(ss.settings.DataSourceReplicas))
|
||||
ss.ReplicaXs = make([]*atomic.Pointer[sqlxDBWrapper], len(ss.settings.DataSourceReplicas))
|
||||
for i, replica := range ss.settings.DataSourceReplicas {
|
||||
handle := SetupConnection(fmt.Sprintf("replica-%v", i), replica, ss.settings)
|
||||
ss.ReplicaXs[i] = newSqlxDBWrapper(sqlx.NewDb(handle, ss.DriverName()),
|
||||
time.Duration(*ss.settings.QueryTimeout)*time.Second,
|
||||
*ss.settings.Trace)
|
||||
if ss.DriverName() == model.DatabaseDriverMysql {
|
||||
ss.ReplicaXs[i].MapperFunc(noOpMapper)
|
||||
}
|
||||
if ss.metrics != nil {
|
||||
ss.metrics.RegisterDBCollector(ss.ReplicaXs[i].DB.DB, "replica-"+strconv.Itoa(i))
|
||||
ss.ReplicaXs[i] = &atomic.Pointer[sqlxDBWrapper]{}
|
||||
handle, err = SetupConnection(fmt.Sprintf("replica-%v", i), replica, ss.settings, DBPingAttempts)
|
||||
if err != nil {
|
||||
// Initializing to be offline
|
||||
ss.ReplicaXs[i].Store(&sqlxDBWrapper{isOnline: &atomic.Bool{}})
|
||||
mlog.Warn("Failed to setup connection. Skipping..", mlog.String("db", fmt.Sprintf("replica-%v", i)), mlog.Err(err))
|
||||
continue
|
||||
}
|
||||
ss.setDB(ss.ReplicaXs[i], handle, "replica-"+strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
|
||||
if len(ss.settings.DataSourceSearchReplicas) > 0 {
|
||||
ss.searchReplicaXs = make([]*sqlxDBWrapper, len(ss.settings.DataSourceSearchReplicas))
|
||||
ss.searchReplicaXs = make([]*atomic.Pointer[sqlxDBWrapper], len(ss.settings.DataSourceSearchReplicas))
|
||||
for i, replica := range ss.settings.DataSourceSearchReplicas {
|
||||
handle := SetupConnection(fmt.Sprintf("search-replica-%v", i), replica, ss.settings)
|
||||
ss.searchReplicaXs[i] = newSqlxDBWrapper(sqlx.NewDb(handle, ss.DriverName()),
|
||||
time.Duration(*ss.settings.QueryTimeout)*time.Second,
|
||||
*ss.settings.Trace)
|
||||
if ss.DriverName() == model.DatabaseDriverMysql {
|
||||
ss.searchReplicaXs[i].MapperFunc(noOpMapper)
|
||||
}
|
||||
if ss.metrics != nil {
|
||||
ss.metrics.RegisterDBCollector(ss.searchReplicaXs[i].DB.DB, "searchreplica-"+strconv.Itoa(i))
|
||||
ss.searchReplicaXs[i] = &atomic.Pointer[sqlxDBWrapper]{}
|
||||
handle, err = SetupConnection(fmt.Sprintf("search-replica-%v", i), replica, ss.settings, DBPingAttempts)
|
||||
if err != nil {
|
||||
// Initializing to be offline
|
||||
ss.searchReplicaXs[i].Store(&sqlxDBWrapper{isOnline: &atomic.Bool{}})
|
||||
mlog.Warn("Failed to setup connection. Skipping..", mlog.String("db", fmt.Sprintf("search-replica-%v", i)), mlog.Err(err))
|
||||
continue
|
||||
}
|
||||
ss.setDB(ss.searchReplicaXs[i], handle, "searchreplica-"+strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
|
||||
@ -347,9 +358,14 @@ func (ss *SqlStore) initConnection() {
|
||||
if src.DataSource == nil {
|
||||
continue
|
||||
}
|
||||
ss.replicaLagHandles[i] = SetupConnection(fmt.Sprintf(replicaLagPrefix+"-%d", i), *src.DataSource, ss.settings)
|
||||
ss.replicaLagHandles[i], err = SetupConnection(fmt.Sprintf(replicaLagPrefix+"-%d", i), *src.DataSource, ss.settings, DBPingAttempts)
|
||||
if err != nil {
|
||||
mlog.Warn("Failed to setup replica lag handle. Skipping..", mlog.String("db", fmt.Sprintf(replicaLagPrefix+"-%d", i)), mlog.Err(err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *SqlStore) DriverName() string {
|
||||
@ -455,8 +471,15 @@ func (ss *SqlStore) GetSearchReplicaX() *sqlxDBWrapper {
|
||||
return ss.GetReplicaX()
|
||||
}
|
||||
|
||||
for i := 0; i < len(ss.searchReplicaXs); i++ {
|
||||
rrNum := atomic.AddInt64(&ss.srCounter, 1) % int64(len(ss.searchReplicaXs))
|
||||
return ss.searchReplicaXs[rrNum]
|
||||
if ss.searchReplicaXs[rrNum].Load().Online() {
|
||||
return ss.searchReplicaXs[rrNum].Load()
|
||||
}
|
||||
}
|
||||
|
||||
// If all search replicas are down, then go with replica.
|
||||
return ss.GetReplicaX()
|
||||
}
|
||||
|
||||
func (ss *SqlStore) GetReplicaX() *sqlxDBWrapper {
|
||||
@ -464,23 +487,64 @@ func (ss *SqlStore) GetReplicaX() *sqlxDBWrapper {
|
||||
return ss.GetMasterX()
|
||||
}
|
||||
|
||||
for i := 0; i < len(ss.ReplicaXs); i++ {
|
||||
rrNum := atomic.AddInt64(&ss.rrCounter, 1) % int64(len(ss.ReplicaXs))
|
||||
return ss.ReplicaXs[rrNum]
|
||||
if ss.ReplicaXs[rrNum].Load().Online() {
|
||||
return ss.ReplicaXs[rrNum].Load()
|
||||
}
|
||||
}
|
||||
|
||||
// If all replicas are down, then go with master.
|
||||
return ss.GetMasterX()
|
||||
}
|
||||
|
||||
func (ss *SqlStore) GetInternalReplicaDBs() []*sql.DB {
|
||||
if len(ss.settings.DataSourceReplicas) == 0 || ss.lockedToMaster || !ss.hasLicense() {
|
||||
return []*sql.DB{
|
||||
ss.GetMasterX().DB.DB,
|
||||
}
|
||||
func (ss *SqlStore) monitorReplicas() {
|
||||
t := time.NewTicker(time.Duration(*ss.settings.ReplicaMonitorIntervalSeconds) * time.Second)
|
||||
defer func() {
|
||||
t.Stop()
|
||||
ss.wgMonitor.Done()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-ss.quitMonitor:
|
||||
return
|
||||
case <-t.C:
|
||||
setupReplica := func(r *atomic.Pointer[sqlxDBWrapper], dsn, name string) {
|
||||
if r.Load().Online() {
|
||||
return
|
||||
}
|
||||
|
||||
dbs := make([]*sql.DB, len(ss.ReplicaXs))
|
||||
for i, rx := range ss.ReplicaXs {
|
||||
dbs[i] = rx.DB.DB
|
||||
handle, err := SetupConnection(name, dsn, ss.settings, 1)
|
||||
if err != nil {
|
||||
mlog.Warn("Failed to setup connection. Skipping..", mlog.String("db", name), mlog.Err(err))
|
||||
return
|
||||
}
|
||||
if ss.metrics != nil && r.Load() != nil && r.Load().DB != nil {
|
||||
ss.metrics.UnregisterDBCollector(r.Load().DB.DB, name)
|
||||
}
|
||||
ss.setDB(r, handle, name)
|
||||
}
|
||||
for i, replica := range ss.ReplicaXs {
|
||||
setupReplica(replica, ss.settings.DataSourceReplicas[i], "replica-"+strconv.Itoa(i))
|
||||
}
|
||||
|
||||
return dbs
|
||||
for i, replica := range ss.searchReplicaXs {
|
||||
setupReplica(replica, ss.settings.DataSourceSearchReplicas[i], "search-replica-"+strconv.Itoa(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *SqlStore) setDB(replica *atomic.Pointer[sqlxDBWrapper], handle *dbsql.DB, name string) {
|
||||
replica.Store(newSqlxDBWrapper(sqlx.NewDb(handle, ss.DriverName()),
|
||||
time.Duration(*ss.settings.QueryTimeout)*time.Second,
|
||||
*ss.settings.Trace))
|
||||
if ss.DriverName() == model.DatabaseDriverMysql {
|
||||
replica.Load().MapperFunc(noOpMapper)
|
||||
}
|
||||
if ss.metrics != nil {
|
||||
ss.metrics.RegisterDBCollector(replica.Load().DB.DB, name)
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *SqlStore) GetInternalReplicaDB() *sql.DB {
|
||||
@ -489,7 +553,7 @@ func (ss *SqlStore) GetInternalReplicaDB() *sql.DB {
|
||||
}
|
||||
|
||||
rrNum := atomic.AddInt64(&ss.rrCounter, 1) % int64(len(ss.ReplicaXs))
|
||||
return ss.ReplicaXs[rrNum].DB.DB
|
||||
return ss.ReplicaXs[rrNum].Load().DB.DB
|
||||
}
|
||||
|
||||
func (ss *SqlStore) TotalMasterDbConnections() int {
|
||||
@ -541,7 +605,10 @@ func (ss *SqlStore) TotalReadDbConnections() int {
|
||||
|
||||
count := 0
|
||||
for _, db := range ss.ReplicaXs {
|
||||
count = count + db.Stats().OpenConnections
|
||||
if !db.Load().Online() {
|
||||
continue
|
||||
}
|
||||
count = count + db.Load().Stats().OpenConnections
|
||||
}
|
||||
|
||||
return count
|
||||
@ -554,7 +621,10 @@ func (ss *SqlStore) TotalSearchDbConnections() int {
|
||||
|
||||
count := 0
|
||||
for _, db := range ss.searchReplicaXs {
|
||||
count = count + db.Stats().OpenConnections
|
||||
if !db.Load().Online() {
|
||||
continue
|
||||
}
|
||||
count = count + db.Load().Stats().OpenConnections
|
||||
}
|
||||
|
||||
return count
|
||||
@ -782,9 +852,14 @@ func IsUniqueConstraintError(err error, indexName []string) bool {
|
||||
}
|
||||
|
||||
func (ss *SqlStore) GetAllConns() []*sqlxDBWrapper {
|
||||
all := make([]*sqlxDBWrapper, len(ss.ReplicaXs)+1)
|
||||
copy(all, ss.ReplicaXs)
|
||||
all[len(ss.ReplicaXs)] = ss.masterX
|
||||
all := make([]*sqlxDBWrapper, 0, len(ss.ReplicaXs)+1)
|
||||
for i := range ss.ReplicaXs {
|
||||
if !ss.ReplicaXs[i].Load().Online() {
|
||||
continue
|
||||
}
|
||||
all = append(all, ss.ReplicaXs[i].Load())
|
||||
}
|
||||
all = append(all, ss.masterX)
|
||||
return all
|
||||
}
|
||||
|
||||
@ -807,11 +882,24 @@ func (ss *SqlStore) RecycleDBConnections(d time.Duration) {
|
||||
|
||||
func (ss *SqlStore) Close() {
|
||||
ss.masterX.Close()
|
||||
// Closing monitor and waiting for it to be done.
|
||||
// This needs to be done before closing the replica handles.
|
||||
close(ss.quitMonitor)
|
||||
ss.wgMonitor.Wait()
|
||||
|
||||
for _, replica := range ss.ReplicaXs {
|
||||
replica.Close()
|
||||
if replica.Load().Online() {
|
||||
replica.Load().Close()
|
||||
}
|
||||
}
|
||||
|
||||
for _, replica := range ss.searchReplicaXs {
|
||||
if replica.Load().Online() {
|
||||
replica.Load().Close()
|
||||
}
|
||||
}
|
||||
|
||||
for _, replica := range ss.replicaLagHandles {
|
||||
replica.Close()
|
||||
}
|
||||
}
|
||||
@ -1132,7 +1220,10 @@ func (ss *SqlStore) migrate(direction migrationDirection) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db := SetupConnection("master", dataSource, ss.settings)
|
||||
db, err2 := SetupConnection("master", dataSource, ss.settings, DBPingAttempts)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
driver, err = ms.WithInstance(db)
|
||||
defer db.Close()
|
||||
case model.DatabaseDriverPostgres:
|
||||
|
@ -765,9 +765,11 @@ func TestReplicaLagQuery(t *testing.T) {
|
||||
srCounter: 0,
|
||||
settings: settings,
|
||||
metrics: mockMetrics,
|
||||
quitMonitor: make(chan struct{}),
|
||||
wgMonitor: &sync.WaitGroup{},
|
||||
}
|
||||
|
||||
store.initConnection()
|
||||
require.NoError(t, store.initConnection())
|
||||
store.stores.post = newSqlPostStore(store, mockMetrics)
|
||||
err = store.migrate(migrationsDirectionUp)
|
||||
require.NoError(t, err)
|
||||
@ -840,8 +842,10 @@ func TestMySQLReadTimeout(t *testing.T) {
|
||||
|
||||
store := &SqlStore{
|
||||
settings: settings,
|
||||
quitMonitor: make(chan struct{}),
|
||||
wgMonitor: &sync.WaitGroup{},
|
||||
}
|
||||
store.initConnection()
|
||||
require.NoError(t, store.initConnection())
|
||||
defer store.Close()
|
||||
|
||||
_, err = store.GetMasterX().ExecNoTimeout(`SELECT SLEEP(3)`)
|
||||
|
@ -688,6 +688,28 @@ func (s *SqlThreadStore) UpdateMembership(membership *model.ThreadMembership) (*
|
||||
return s.updateMembership(s.GetMasterX(), membership)
|
||||
}
|
||||
|
||||
func (s *SqlThreadStore) DeleteMembershipsForChannel(userID, channelID string) error {
|
||||
subQuery := s.getSubQueryBuilder().
|
||||
Select("1").
|
||||
From("Threads").
|
||||
Where(sq.And{
|
||||
sq.Expr("Threads.PostId = ThreadMemberships.PostId"),
|
||||
sq.Eq{"Threads.ChannelId": channelID},
|
||||
})
|
||||
|
||||
query := s.getQueryBuilder().
|
||||
Delete("ThreadMemberships").
|
||||
Where(sq.Eq{"UserId": userID}).
|
||||
Where(sq.Expr("EXISTS (?)", subQuery))
|
||||
|
||||
_, err := s.GetMasterX().ExecBuilder(query)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to remove thread memberships with userid=%s channelid=%s", userID, channelID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SqlThreadStore) updateMembership(ex sqlxExecutor, membership *model.ThreadMembership) (*model.ThreadMembership, error) {
|
||||
query := s.getQueryBuilder().
|
||||
Update("ThreadMemberships").
|
||||
@ -712,7 +734,14 @@ func (s *SqlThreadStore) GetMembershipsForUser(userId, teamId string) ([]*model.
|
||||
memberships := []*model.ThreadMembership{}
|
||||
|
||||
query := s.getQueryBuilder().
|
||||
Select("ThreadMemberships.*").
|
||||
Select(
|
||||
"ThreadMemberships.PostId",
|
||||
"ThreadMemberships.UserId",
|
||||
"ThreadMemberships.Following",
|
||||
"ThreadMemberships.LastUpdated",
|
||||
"ThreadMemberships.LastViewed",
|
||||
"ThreadMemberships.UnreadMentions",
|
||||
).
|
||||
Join("Threads ON Threads.PostId = ThreadMemberships.PostId").
|
||||
From("ThreadMemberships").
|
||||
Where(sq.Or{sq.Eq{"Threads.ThreadTeamId": teamId}, sq.Eq{"Threads.ThreadTeamId": ""}}).
|
||||
@ -732,7 +761,14 @@ func (s *SqlThreadStore) GetMembershipForUser(userId, postId string) (*model.Thr
|
||||
func (s *SqlThreadStore) getMembershipForUser(ex sqlxExecutor, userId, postId string) (*model.ThreadMembership, error) {
|
||||
var membership model.ThreadMembership
|
||||
query := s.getQueryBuilder().
|
||||
Select("*").
|
||||
Select(
|
||||
"PostId",
|
||||
"UserId",
|
||||
"Following",
|
||||
"LastUpdated",
|
||||
"LastViewed",
|
||||
"UnreadMentions",
|
||||
).
|
||||
From("ThreadMemberships").
|
||||
Where(sq.And{
|
||||
sq.Eq{"PostId": postId},
|
||||
|
@ -233,3 +233,14 @@ func SanitizeDataSource(driverName, dataSource string) (string, error) {
|
||||
return "", errors.New("invalid drivername. Not postgres or mysql.")
|
||||
}
|
||||
}
|
||||
|
||||
const maxTokenSize = 50
|
||||
|
||||
// trimInput limits the string to a max size to prevent clogging up disk space
|
||||
// while logging
|
||||
func trimInput(input string) string {
|
||||
if len(input) > maxTokenSize {
|
||||
input = input[:maxTokenSize] + "..."
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
@ -72,10 +72,7 @@ type Store interface {
|
||||
// GetInternalMasterDB allows access to the raw master DB
|
||||
// handle for the multi-product architecture.
|
||||
GetInternalMasterDB() *sql.DB
|
||||
// GetInternalReplicaDBs allows access to the raw replica DB
|
||||
// handles for the multi-product architecture.
|
||||
GetInternalReplicaDB() *sql.DB
|
||||
GetInternalReplicaDBs() []*sql.DB
|
||||
TotalMasterDbConnections() int
|
||||
TotalReadDbConnections() int
|
||||
TotalSearchDbConnections() int
|
||||
@ -347,6 +344,7 @@ type ThreadStore interface {
|
||||
PermanentDeleteBatchThreadMembershipsForRetentionPolicies(now, globalPolicyEndTime, limit int64, cursor model.RetentionPolicyCursor) (int64, model.RetentionPolicyCursor, error)
|
||||
DeleteOrphanedRows(limit int) (deleted int64, err error)
|
||||
GetThreadUnreadReplyCount(threadMembership *model.ThreadMembership) (int64, error)
|
||||
DeleteMembershipsForChannel(userID, channelID string) error
|
||||
|
||||
// Insights - threads
|
||||
GetTopThreadsForTeamSince(teamID string, userID string, since int64, offset int, limit int) (*model.TopThreadList, error)
|
||||
|
@ -115,7 +115,7 @@ func TestChannelStore(t *testing.T, ss store.Store, s SqlStore) {
|
||||
t.Run("GetGuestCount", func(t *testing.T) { testGetGuestCount(t, ss) })
|
||||
t.Run("SearchMore", func(t *testing.T) { testChannelStoreSearchMore(t, ss) })
|
||||
t.Run("SearchInTeam", func(t *testing.T) { testChannelStoreSearchInTeam(t, ss) })
|
||||
t.Run("Autocomplete", func(t *testing.T) { testAutocomplete(t, ss) })
|
||||
t.Run("Autocomplete", func(t *testing.T) { testAutocomplete(t, ss, s) })
|
||||
t.Run("SearchArchivedInTeam", func(t *testing.T) { testChannelStoreSearchArchivedInTeam(t, ss, s) })
|
||||
t.Run("SearchForUserInTeam", func(t *testing.T) { testChannelStoreSearchForUserInTeam(t, ss) })
|
||||
t.Run("SearchAllChannels", func(t *testing.T) { testChannelStoreSearchAllChannels(t, ss) })
|
||||
@ -5986,7 +5986,7 @@ func testChannelStoreSearchInTeam(t *testing.T, ss store.Store) {
|
||||
}
|
||||
}
|
||||
|
||||
func testAutocomplete(t *testing.T, ss store.Store) {
|
||||
func testAutocomplete(t *testing.T, ss store.Store, s SqlStore) {
|
||||
t1 := &model.Team{
|
||||
DisplayName: "t1",
|
||||
Name: NewTestId(),
|
||||
@ -6165,9 +6165,9 @@ func testAutocomplete(t *testing.T, ss store.Store) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run("Autocomplete/"+testCase.Description, func(t *testing.T) {
|
||||
channels, err := ss.Channel().Autocomplete(testCase.UserID, testCase.Term, testCase.IncludeDeleted, testCase.IsGuest)
|
||||
require.NoError(t, err)
|
||||
t.Run(testCase.Description, func(t *testing.T) {
|
||||
channels, err2 := ss.Channel().Autocomplete(testCase.UserID, testCase.Term, testCase.IncludeDeleted, testCase.IsGuest)
|
||||
require.NoError(t, err2)
|
||||
var gotChannelIds []string
|
||||
var gotTeamNames []string
|
||||
for _, ch := range channels {
|
||||
@ -6178,6 +6178,24 @@ func testAutocomplete(t *testing.T, ss store.Store) {
|
||||
require.ElementsMatch(t, testCase.ExpectedTeamNames, gotTeamNames, "team names are not as expected")
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Limit", func(t *testing.T) {
|
||||
for i := 0; i < model.ChannelSearchDefaultLimit+10; i++ {
|
||||
_, err = ss.Channel().Save(&model.Channel{
|
||||
TeamId: teamID,
|
||||
DisplayName: "Channel " + strconv.Itoa(i),
|
||||
Name: NewTestId(),
|
||||
Type: model.ChannelTypeOpen,
|
||||
}, -1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
channels, err := ss.Channel().Autocomplete(m1.UserId, "Chann", false, false)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, channels, model.ChannelSearchDefaultLimit)
|
||||
})
|
||||
|
||||
// Manually truncate Channels table until testlib can handle cleanups
|
||||
s.GetMasterX().Exec("TRUNCATE Channels")
|
||||
}
|
||||
|
||||
func testChannelStoreSearchForUserInTeam(t *testing.T, ss store.Store) {
|
||||
|
@ -672,6 +672,38 @@ func testCreateSidebarCategory(t *testing.T, ss store.Store) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{}, res2.Channels)
|
||||
})
|
||||
|
||||
t.Run("should store the correct sorting value", func(t *testing.T) {
|
||||
userId := model.NewId()
|
||||
|
||||
team := setupTeam(t, ss, userId)
|
||||
|
||||
opts := &store.SidebarCategorySearchOpts{
|
||||
TeamID: team.Id,
|
||||
ExcludeTeam: false,
|
||||
}
|
||||
res, nErr := ss.Channel().CreateInitialSidebarCategories(userId, opts)
|
||||
require.NoError(t, nErr)
|
||||
require.NotEmpty(t, res)
|
||||
// Create the category
|
||||
created, err := ss.Channel().CreateSidebarCategory(userId, team.Id, &model.SidebarCategoryWithChannels{
|
||||
SidebarCategory: model.SidebarCategory{
|
||||
DisplayName: model.NewId(),
|
||||
Sorting: model.SidebarCategorySortManual,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Confirm that sorting value is correct
|
||||
res, err = ss.Channel().GetSidebarCategoriesForTeamForUser(userId, team.Id)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, res.Categories, 4)
|
||||
// first category will be favorites and second will be newly created
|
||||
assert.Equal(t, model.SidebarCategoryCustom, res.Categories[1].Type)
|
||||
assert.Equal(t, created.Id, res.Categories[1].Id)
|
||||
assert.Equal(t, model.SidebarCategorySortManual, res.Categories[1].Sorting)
|
||||
assert.Equal(t, model.SidebarCategorySortManual, created.Sorting)
|
||||
})
|
||||
}
|
||||
|
||||
func testGetSidebarCategory(t *testing.T, ss store.Store, s SqlStore) {
|
||||
|
@ -346,22 +346,6 @@ func (_m *Store) GetInternalReplicaDB() *sql.DB {
|
||||
return r0
|
||||
}
|
||||
|
||||
// GetInternalReplicaDBs provides a mock function with given fields:
|
||||
func (_m *Store) GetInternalReplicaDBs() []*sql.DB {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 []*sql.DB
|
||||
if rf, ok := ret.Get(0).(func() []*sql.DB); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).([]*sql.DB)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Group provides a mock function with given fields:
|
||||
func (_m *Store) Group() store.GroupStore {
|
||||
ret := _m.Called()
|
||||
|
@ -29,6 +29,20 @@ func (_m *ThreadStore) DeleteMembershipForUser(userId string, postID string) err
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteMembershipsForChannel provides a mock function with given fields: userID, channelID
|
||||
func (_m *ThreadStore) DeleteMembershipsForChannel(userID string, channelID string) error {
|
||||
ret := _m.Called(userID, channelID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(string, string) error); ok {
|
||||
r0 = rf(userID, channelID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteOrphanedRows provides a mock function with given fields: limit
|
||||
func (_m *ThreadStore) DeleteOrphanedRows(limit int) (int64, error) {
|
||||
ret := _m.Called(limit)
|
||||
|
@ -261,6 +261,7 @@ func MakeSqlSettings(driver string, withReplica bool) *model.SqlSettings {
|
||||
}
|
||||
|
||||
log("Created temporary " + driver + " database " + dbName)
|
||||
settings.ReplicaMonitorIntervalSeconds = model.NewInt(5)
|
||||
|
||||
return settings
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ func TestThreadStore(t *testing.T, ss store.Store, s SqlStore) {
|
||||
t.Run("MarkAllAsReadByChannels", func(t *testing.T) { testMarkAllAsReadByChannels(t, ss) })
|
||||
t.Run("GetTopThreads", func(t *testing.T) { testGetTopThreads(t, ss) })
|
||||
t.Run("MarkAllAsReadByTeam", func(t *testing.T) { testMarkAllAsReadByTeam(t, ss) })
|
||||
t.Run("DeleteMembershipsForChannel", func(t *testing.T) { testDeleteMembershipsForChannel(t, ss) })
|
||||
}
|
||||
|
||||
func testThreadStorePopulation(t *testing.T, ss store.Store) {
|
||||
@ -1914,3 +1915,121 @@ func testMarkAllAsReadByTeam(t *testing.T, ss store.Store) {
|
||||
assertThreadReplyCount(t, userBID, team2.Id, 1, "expected 1 unread message in team2 for userB")
|
||||
})
|
||||
}
|
||||
|
||||
func testDeleteMembershipsForChannel(t *testing.T, ss store.Store) {
|
||||
createThreadMembership := func(userID, postID string) (*model.ThreadMembership, func()) {
|
||||
t.Helper()
|
||||
opts := store.ThreadMembershipOpts{
|
||||
Following: true,
|
||||
IncrementMentions: false,
|
||||
UpdateFollowing: true,
|
||||
UpdateViewedTimestamp: false,
|
||||
UpdateParticipants: false,
|
||||
}
|
||||
mem, err := ss.Thread().MaintainMembership(userID, postID, opts)
|
||||
require.NoError(t, err)
|
||||
|
||||
return mem, func() {
|
||||
err := ss.Thread().DeleteMembershipForUser(userID, postID)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
postingUserID := model.NewId()
|
||||
userAID := model.NewId()
|
||||
userBID := model.NewId()
|
||||
|
||||
team, err := ss.Team().Save(&model.Team{
|
||||
DisplayName: "DisplayName",
|
||||
Name: "team" + model.NewId(),
|
||||
Email: MakeEmail(),
|
||||
Type: model.TeamOpen,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
channel1, err := ss.Channel().Save(&model.Channel{
|
||||
TeamId: team.Id,
|
||||
DisplayName: "DisplayName",
|
||||
Name: "channel1" + model.NewId(),
|
||||
Type: model.ChannelTypeOpen,
|
||||
}, -1)
|
||||
require.NoError(t, err)
|
||||
channel2, err := ss.Channel().Save(&model.Channel{
|
||||
TeamId: team.Id,
|
||||
DisplayName: "DisplayName2",
|
||||
Name: "channel2" + model.NewId(),
|
||||
Type: model.ChannelTypeOpen,
|
||||
}, -1)
|
||||
require.NoError(t, err)
|
||||
|
||||
rootPost1, err := ss.Post().Save(&model.Post{
|
||||
ChannelId: channel1.Id,
|
||||
UserId: postingUserID,
|
||||
Message: model.NewRandomString(10),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = ss.Post().Save(&model.Post{
|
||||
ChannelId: channel1.Id,
|
||||
UserId: postingUserID,
|
||||
Message: model.NewRandomString(10),
|
||||
RootId: rootPost1.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
rootPost2, err := ss.Post().Save(&model.Post{
|
||||
ChannelId: channel2.Id,
|
||||
UserId: postingUserID,
|
||||
Message: model.NewRandomString(10),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = ss.Post().Save(&model.Post{
|
||||
ChannelId: channel2.Id,
|
||||
UserId: postingUserID,
|
||||
Message: model.NewRandomString(10),
|
||||
RootId: rootPost2.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("should return memberships for user", func(t *testing.T) {
|
||||
memA1, cleanupA1 := createThreadMembership(userAID, rootPost1.Id)
|
||||
defer cleanupA1()
|
||||
memA2, cleanupA2 := createThreadMembership(userAID, rootPost2.Id)
|
||||
defer cleanupA2()
|
||||
|
||||
membershipsA, err := ss.Thread().GetMembershipsForUser(userAID, team.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, membershipsA, 2)
|
||||
require.ElementsMatch(t, []*model.ThreadMembership{memA1, memA2}, membershipsA)
|
||||
})
|
||||
|
||||
t.Run("should delete memberships for user for channel", func(t *testing.T) {
|
||||
_, cleanupA1 := createThreadMembership(userAID, rootPost1.Id)
|
||||
defer cleanupA1()
|
||||
memA2, cleanupA2 := createThreadMembership(userAID, rootPost2.Id)
|
||||
defer cleanupA2()
|
||||
|
||||
ss.Thread().DeleteMembershipsForChannel(userAID, channel1.Id)
|
||||
membershipsA, err := ss.Thread().GetMembershipsForUser(userAID, team.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, membershipsA, 1)
|
||||
require.ElementsMatch(t, []*model.ThreadMembership{memA2}, membershipsA)
|
||||
})
|
||||
|
||||
t.Run("deleting memberships for channel for userA should not affect userB", func(t *testing.T) {
|
||||
_, cleanupA1 := createThreadMembership(userAID, rootPost1.Id)
|
||||
defer cleanupA1()
|
||||
_, cleanupA2 := createThreadMembership(userAID, rootPost2.Id)
|
||||
defer cleanupA2()
|
||||
memB1, cleanupB2 := createThreadMembership(userBID, rootPost1.Id)
|
||||
defer cleanupB2()
|
||||
|
||||
membershipsB, err := ss.Thread().GetMembershipsForUser(userBID, team.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, membershipsB, 1)
|
||||
require.ElementsMatch(t, []*model.ThreadMembership{memB1}, membershipsB)
|
||||
})
|
||||
}
|
||||
|
@ -9112,6 +9112,22 @@ func (s *TimerLayerThreadStore) DeleteMembershipForUser(userId string, postID st
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *TimerLayerThreadStore) DeleteMembershipsForChannel(userID string, channelID string) error {
|
||||
start := time.Now()
|
||||
|
||||
err := s.ThreadStore.DeleteMembershipsForChannel(userID, channelID)
|
||||
|
||||
elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||
if s.Root.Metrics != nil {
|
||||
success := "false"
|
||||
if err == nil {
|
||||
success = "true"
|
||||
}
|
||||
s.Root.Metrics.ObserveStoreMethodDuration("ThreadStore.DeleteMembershipsForChannel", success, elapsed)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *TimerLayerThreadStore) DeleteOrphanedRows(limit int) (int64, error) {
|
||||
start := time.Now()
|
||||
|
||||
|
@ -58,6 +58,11 @@ func NewMainHelperWithOptions(options *HelperOptions) *MainHelper {
|
||||
os.Unsetenv("MM_SQLSETTINGS_DATASOURCE")
|
||||
}
|
||||
|
||||
// Unset environment variables commonly set for development that interfere with tests.
|
||||
os.Unsetenv("MM_SERVICESETTINGS_SITEURL")
|
||||
os.Unsetenv("MM_SERVICESETTINGS_LISTENADDRESS")
|
||||
os.Unsetenv("MM_SERVICESETTINGS_ENABLEDEVELOPER")
|
||||
|
||||
var mainHelper MainHelper
|
||||
flag.Parse()
|
||||
|
||||
@ -331,7 +336,7 @@ func (h *MainHelper) SetReplicationLagForTesting(seconds int) error {
|
||||
|
||||
func (h *MainHelper) execOnEachReplica(query string, args ...any) error {
|
||||
for _, replica := range h.SQLStore.ReplicaXs {
|
||||
_, err := replica.Exec(query, args...)
|
||||
_, err := replica.Load().Exec(query, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -210,7 +210,6 @@ func GetSanitizedClientLicense(l map[string]string) map[string]string {
|
||||
delete(sanitizedLicense, "StartsAt")
|
||||
delete(sanitizedLicense, "ExpiresAt")
|
||||
delete(sanitizedLicense, "SkuName")
|
||||
delete(sanitizedLicense, "SkuShortName")
|
||||
|
||||
return sanitizedLicense
|
||||
}
|
||||
|
@ -1777,6 +1777,10 @@
|
||||
"id": "api.error_get_first_admin_visit_marketplace_status",
|
||||
"translation": "Error trying to retrieve the first admin visit marketplace status from the store."
|
||||
},
|
||||
{
|
||||
"id": "api.error_no_organization_name_provided_for_self_hosted_onboarding",
|
||||
"translation": "Error no organization name provided for self hosted onboarding."
|
||||
},
|
||||
{
|
||||
"id": "api.error_set_first_admin_complete_setup",
|
||||
"translation": "Error trying to save first admin complete setup in the store."
|
||||
|
@ -179,6 +179,7 @@ type Subscription struct {
|
||||
DelinquentSince *int64 `json:"delinquent_since"`
|
||||
OriginallyLicensedSeats int `json:"originally_licensed_seats"`
|
||||
ComplianceBlocked string `json:"compliance_blocked"`
|
||||
BillingType string `json:"billing_type"`
|
||||
}
|
||||
|
||||
// Subscription History model represents true up event in a yearly subscription
|
||||
|
@ -390,7 +390,6 @@ type ServiceSettings struct {
|
||||
EnableCustomGroups *bool `access:"site_users_and_teams"`
|
||||
SelfHostedPurchase *bool `access:"write_restrictable,cloud_restrictable"`
|
||||
AllowSyncedDrafts *bool `access:"site_posts"`
|
||||
SelfHostedExpansion *bool `access:"write_restrictable,cloud_restrictable"`
|
||||
}
|
||||
|
||||
func (s *ServiceSettings) SetDefaults(isUpdate bool) {
|
||||
@ -863,10 +862,6 @@ func (s *ServiceSettings) SetDefaults(isUpdate bool) {
|
||||
if s.SelfHostedPurchase == nil {
|
||||
s.SelfHostedPurchase = NewBool(true)
|
||||
}
|
||||
|
||||
if s.SelfHostedExpansion == nil {
|
||||
s.SelfHostedExpansion = NewBool(false)
|
||||
}
|
||||
}
|
||||
|
||||
type ClusterSettings struct {
|
||||
@ -1173,6 +1168,7 @@ type SqlSettings struct {
|
||||
DisableDatabaseSearch *bool `access:"environment_database,write_restrictable,cloud_restrictable"`
|
||||
MigrationsStatementTimeoutSeconds *int `access:"environment_database,write_restrictable,cloud_restrictable"`
|
||||
ReplicaLagSettings []*ReplicaLagSettings `access:"environment_database,write_restrictable,cloud_restrictable"` // telemetry: none
|
||||
ReplicaMonitorIntervalSeconds *int `access:"environment_database,write_restrictable,cloud_restrictable"`
|
||||
}
|
||||
|
||||
func (s *SqlSettings) SetDefaults(isUpdate bool) {
|
||||
@ -1237,6 +1233,10 @@ func (s *SqlSettings) SetDefaults(isUpdate bool) {
|
||||
if s.ReplicaLagSettings == nil {
|
||||
s.ReplicaLagSettings = []*ReplicaLagSettings{}
|
||||
}
|
||||
|
||||
if s.ReplicaMonitorIntervalSeconds == nil {
|
||||
s.ReplicaMonitorIntervalSeconds = NewInt(5)
|
||||
}
|
||||
}
|
||||
|
||||
type LogSettings struct {
|
||||
|
@ -36,7 +36,8 @@ type SelfHostedCustomerForm struct {
|
||||
|
||||
type SelfHostedConfirmPaymentMethodRequest struct {
|
||||
StripeSetupIntentID string `json:"stripe_setup_intent_id"`
|
||||
Subscription CreateSubscriptionRequest `json:"subscription"`
|
||||
Subscription *CreateSubscriptionRequest `json:"subscription"`
|
||||
ExpandRequest *SelfHostedExpansionRequest `json:"expand_request"`
|
||||
}
|
||||
|
||||
// SelfHostedSignupPaymentResponse contains feels needed for self hosted signup to confirm payment and receive license.
|
||||
@ -65,3 +66,8 @@ type SelfHostedBillingAccessRequest struct {
|
||||
type SelfHostedBillingAccessResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type SelfHostedExpansionRequest struct {
|
||||
Seats int `json:"seats"`
|
||||
LicenseId string `json:"license_id"`
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
// CompleteOnboardingRequest describes parameters of the requested plugin.
|
||||
type CompleteOnboardingRequest struct {
|
||||
Organization string `json:"organization"` // Organization is the name of the organization
|
||||
InstallPlugins []string `json:"install_plugins"` // InstallPlugins is a list of plugins to be installed
|
||||
}
|
||||
|
||||
|
@ -21,10 +21,6 @@ type Permission struct {
|
||||
|
||||
var PermissionInviteUser *Permission
|
||||
var PermissionAddUserToTeam *Permission
|
||||
|
||||
// Deprecated: PermissionCreatePost should be used to determine if a slash command can be executed.
|
||||
// TODO: Remove in 8.0: https://mattermost.atlassian.net/browse/MM-51274
|
||||
var PermissionUseSlashCommands *Permission
|
||||
var PermissionManageSlashCommands *Permission
|
||||
var PermissionManageOthersSlashCommands *Permission
|
||||
var PermissionCreatePublicChannel *Permission
|
||||
@ -393,12 +389,6 @@ func initializePermissions() {
|
||||
"authentication.permissions.add_user_to_team.description",
|
||||
PermissionScopeTeam,
|
||||
}
|
||||
PermissionUseSlashCommands = &Permission{
|
||||
"use_slash_commands",
|
||||
"authentication.permissions.team_use_slash_commands.name",
|
||||
"authentication.permissions.team_use_slash_commands.description",
|
||||
PermissionScopeChannel,
|
||||
}
|
||||
PermissionManageSlashCommands = &Permission{
|
||||
"manage_slash_commands",
|
||||
"authentication.permissions.manage_slash_commands.name",
|
||||
@ -2318,7 +2308,6 @@ func initializePermissions() {
|
||||
}
|
||||
|
||||
ChannelScopedPermissions := []*Permission{
|
||||
PermissionUseSlashCommands,
|
||||
PermissionManagePublicChannelMembers,
|
||||
PermissionManagePrivateChannelMembers,
|
||||
PermissionManageChannelRoles,
|
||||
|
@ -755,7 +755,6 @@ func MakeDefaultRoles() map[string]*Role {
|
||||
PermissionEditPost.Id,
|
||||
PermissionCreatePost.Id,
|
||||
PermissionUseChannelMentions.Id,
|
||||
PermissionUseSlashCommands.Id,
|
||||
},
|
||||
SchemeManaged: true,
|
||||
BuiltIn: true,
|
||||
@ -774,7 +773,6 @@ func MakeDefaultRoles() map[string]*Role {
|
||||
PermissionGetPublicLink.Id,
|
||||
PermissionCreatePost.Id,
|
||||
PermissionUseChannelMentions.Id,
|
||||
PermissionUseSlashCommands.Id,
|
||||
PermissionManagePublicChannelProperties.Id,
|
||||
PermissionDeletePublicChannel.Id,
|
||||
PermissionManagePrivateChannelProperties.Id,
|
||||
|
@ -71,7 +71,6 @@ func TestRolePatchFromChannelModerationsPatch(t *testing.T) {
|
||||
PermissionManagePublicChannelMembers.Id,
|
||||
PermissionUploadFile.Id,
|
||||
PermissionGetPublicLink.Id,
|
||||
PermissionUseSlashCommands.Id,
|
||||
}
|
||||
|
||||
baseModeratedPermissions := []string{
|
||||
|
@ -16,6 +16,7 @@ const (
|
||||
SystemAsymmetricSigningKeyKey = "AsymmetricSigningKey"
|
||||
SystemPostActionCookieSecretKey = "PostActionCookieSecret"
|
||||
SystemInstallationDateKey = "InstallationDate"
|
||||
SystemOrganizationName = "OrganizationName"
|
||||
SystemFirstServerRunTimestampKey = "FirstServerRunTimestamp"
|
||||
SystemClusterEncryptionKey = "ClusterEncryptionKey"
|
||||
SystemUpgradedFromTeId = "UpgradedFromTE"
|
||||
|
@ -251,6 +251,8 @@ type AppError struct {
|
||||
wrapped error
|
||||
}
|
||||
|
||||
const maxErrorLength = 1024
|
||||
|
||||
func (er *AppError) Error() string {
|
||||
var sb strings.Builder
|
||||
|
||||
@ -276,7 +278,11 @@ func (er *AppError) Error() string {
|
||||
sb.WriteString(err.Error())
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
res := sb.String()
|
||||
if len(res) > maxErrorLength {
|
||||
res = res[:maxErrorLength] + "..."
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (er *AppError) Translate(T i18n.TranslateFunc) {
|
||||
|
@ -116,6 +116,13 @@ func TestAppErrorRender(t *testing.T) {
|
||||
aerr := NewAppError("here", "message", nil, "details", http.StatusTeapot).Wrap(fmt.Errorf("my error (%w)", fmt.Errorf("inner error")))
|
||||
assert.EqualError(t, aerr, "here: message, details, my error (inner error)")
|
||||
})
|
||||
|
||||
t.Run("MaxLength", func(t *testing.T) {
|
||||
str := strings.Repeat("error", 65536)
|
||||
msg := "msg"
|
||||
aerr := NewAppError("id", msg, nil, str, http.StatusTeapot).Wrap(errors.New(str))
|
||||
assert.Len(t, aerr.Error(), maxErrorLength+len(msg))
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppErrorSerialize(t *testing.T) {
|
||||
|
@ -70,6 +70,7 @@ type WorkTemplatePlaybook struct {
|
||||
|
||||
type WorkTemplateIntegration struct {
|
||||
ID string `json:"id"`
|
||||
Recommended bool `json:"recommended"`
|
||||
}
|
||||
|
||||
type WorkTemplateContent struct {
|
||||
|
@ -476,7 +476,6 @@ func (ts *TelemetryService) trackConfig() {
|
||||
"post_priority": *cfg.ServiceSettings.PostPriority,
|
||||
"self_hosted_purchase": *cfg.ServiceSettings.SelfHostedPurchase,
|
||||
"allow_synced_drafts": *cfg.ServiceSettings.AllowSyncedDrafts,
|
||||
"self_hosted_expansion": *cfg.ServiceSettings.SelfHostedExpansion,
|
||||
})
|
||||
|
||||
ts.SendTelemetry(TrackConfigTeam, map[string]any{
|
||||
@ -522,6 +521,7 @@ func (ts *TelemetryService) trackConfig() {
|
||||
"query_timeout": *cfg.SqlSettings.QueryTimeout,
|
||||
"disable_database_search": *cfg.SqlSettings.DisableDatabaseSearch,
|
||||
"migrations_statement_timeout_seconds": *cfg.SqlSettings.MigrationsStatementTimeoutSeconds,
|
||||
"replica_monitor_interval_seconds": *cfg.SqlSettings.ReplicaMonitorIntervalSeconds,
|
||||
})
|
||||
|
||||
ts.SendTelemetry(TrackConfigLog, map[string]any{
|
||||
|
@ -628,7 +628,7 @@ func MergeInlineText(inlines []Inline) []Inline {
|
||||
}
|
||||
|
||||
func Unescape(markdown string) string {
|
||||
ret := ""
|
||||
var ret strings.Builder
|
||||
|
||||
position := 0
|
||||
for position < len(markdown) {
|
||||
@ -637,27 +637,27 @@ func Unescape(markdown string) string {
|
||||
switch c {
|
||||
case '\\':
|
||||
if position+1 < len(markdown) && isEscapableByte(markdown[position+1]) {
|
||||
ret += string(markdown[position+1])
|
||||
ret.WriteByte(markdown[position+1])
|
||||
position += 2
|
||||
} else {
|
||||
ret += `\`
|
||||
ret.WriteString(`\`)
|
||||
position++
|
||||
}
|
||||
case '&':
|
||||
position++
|
||||
if semicolon := strings.IndexByte(markdown[position:], ';'); semicolon == -1 {
|
||||
ret += "&"
|
||||
ret.WriteString("&")
|
||||
} else if s := CharacterReference(markdown[position : position+semicolon]); s != "" {
|
||||
position += semicolon + 1
|
||||
ret += s
|
||||
ret.WriteString(s)
|
||||
} else {
|
||||
ret += "&"
|
||||
ret.WriteString("&")
|
||||
}
|
||||
default:
|
||||
ret += string(c)
|
||||
ret.WriteRune(c)
|
||||
position += cSize
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
return ret.String()
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ info:
|
||||
servers:
|
||||
- url: http://localhost:8065/plugins/playbooks/api/v0
|
||||
paths:
|
||||
/runs:
|
||||
/plugins/playbooks/api/v0/runs:
|
||||
get:
|
||||
summary: List all playbook runs
|
||||
description: Retrieve a paged list of playbook runs, filtered by team, status, owner, name and/or members, and sorted by ID, name, status, creation date, end date, team or owner ID.
|
||||
@ -198,7 +198,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/dialog:
|
||||
/plugins/playbooks/api/v0/runs/dialog:
|
||||
post:
|
||||
summary: Create a new playbook run from dialog
|
||||
description: This is an internal endpoint to create a playbook run from the submission of an interactive dialog, filled by a user in the webapp. See [Interactive Dialogs](https://docs.mattermost.com/developer/interactive-dialogs.html) for more information.
|
||||
@ -276,7 +276,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/owners:
|
||||
/plugins/playbooks/api/v0/runs/owners:
|
||||
get:
|
||||
summary: Get all owners
|
||||
description: Get the owners of all playbook runs, filtered by team.
|
||||
@ -314,7 +314,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/channels:
|
||||
/plugins/playbooks/api/v0/runs/channels:
|
||||
get:
|
||||
summary: Get playbook run channels
|
||||
description: Get all channels associated with a playbook run, filtered by team, status, owner, name and/or members, and sorted by ID, name, status, creation date, end date, team, or owner ID.
|
||||
@ -413,7 +413,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/checklist-autocomplete:
|
||||
/plugins/playbooks/api/v0/runs/checklist-autocomplete:
|
||||
get:
|
||||
summary: Get autocomplete data for /playbook check
|
||||
description: This is an internal endpoint used by the autocomplete system to retrieve the data needed to show the list of items that the user can check.
|
||||
@ -459,7 +459,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/channel/{channel_id}:
|
||||
/plugins/playbooks/api/v0/runs/channel/{channel_id}:
|
||||
get:
|
||||
summary: Find playbook run by channel ID
|
||||
operationId: getPlaybookRunByChannelId
|
||||
@ -492,7 +492,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}:
|
||||
/plugins/playbooks/api/v0/runs/{id}:
|
||||
get:
|
||||
summary: Get a playbook run
|
||||
operationId: getPlaybookRun
|
||||
@ -565,7 +565,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/metadata:
|
||||
/plugins/playbooks/api/v0/runs/{id}/metadata:
|
||||
get:
|
||||
summary: Get playbook run metadata
|
||||
operationId: getPlaybookRunMetadata
|
||||
@ -598,7 +598,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/end:
|
||||
/plugins/playbooks/api/v0/runs/{id}/end:
|
||||
put:
|
||||
summary: End a playbook run
|
||||
operationId: endPlaybookRun
|
||||
@ -651,7 +651,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/restart:
|
||||
/plugins/playbooks/api/v0/runs/{id}/restart:
|
||||
put:
|
||||
summary: Restart a playbook run
|
||||
operationId: restartPlaybookRun
|
||||
@ -678,7 +678,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/status:
|
||||
/plugins/playbooks/api/v0/runs/{id}/status:
|
||||
post:
|
||||
summary: Update a playbook run's status
|
||||
operationId: status
|
||||
@ -728,7 +728,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/finish:
|
||||
/plugins/playbooks/api/v0/runs/{id}/finish:
|
||||
put:
|
||||
summary: Finish a playbook
|
||||
operationId: finish
|
||||
@ -755,7 +755,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/owner:
|
||||
/plugins/playbooks/api/v0/runs/{id}/owner:
|
||||
post:
|
||||
summary: Update playbook run owner
|
||||
operationId: changeOwner
|
||||
@ -800,7 +800,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/next-stage-dialog:
|
||||
/plugins/playbooks/api/v0/runs/{id}/next-stage-dialog:
|
||||
post:
|
||||
summary: Go to next stage from dialog
|
||||
description: This is an internal endpoint to go to the next stage via a confirmation dialog, submitted by a user in the webapp.
|
||||
@ -835,7 +835,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/checklists/{checklist}/add:
|
||||
/plugins/playbooks/api/v0/runs/{id}/checklists/{checklist}/add:
|
||||
put:
|
||||
summary: Add an item to a playbook run's checklist
|
||||
description: The most common pattern to add a new item is to only send its title as the request payload. By default, it is an open item, with no assignee and no slash command.
|
||||
@ -923,7 +923,7 @@ paths:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Error"
|
||||
|
||||
/runs/{id}/checklists/{checklist}/reorder:
|
||||
/plugins/playbooks/api/v0/runs/{id}/checklists/{checklist}/reorder:
|
||||
put:
|
||||
summary: Reorder an item in a playbook run's checklist
|
||||
operationId: reoderChecklistItem
|
||||
@ -978,7 +978,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/checklists/{checklist}/item/{item}:
|
||||
/plugins/playbooks/api/v0/runs/{id}/checklists/{checklist}/item/{item}:
|
||||
put:
|
||||
summary: Update an item of a playbook run's checklist
|
||||
description: Update the title and the slash command of an item in one of the playbook run's checklists.
|
||||
@ -1083,7 +1083,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/checklists/{checklist}/item/{item}/state:
|
||||
/plugins/playbooks/api/v0/runs/{id}/checklists/{checklist}/item/{item}/state:
|
||||
put:
|
||||
summary: Update the state of an item
|
||||
operationId: itemSetState
|
||||
@ -1145,7 +1145,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/checklists/{checklist}/item/{item}/assignee:
|
||||
/plugins/playbooks/api/v0/runs/{id}/checklists/{checklist}/item/{item}/assignee:
|
||||
put:
|
||||
summary: Update the assignee of an item
|
||||
operationId: itemSetAssignee
|
||||
@ -1202,7 +1202,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/checklists/{checklist}/item/{item}/run:
|
||||
/plugins/playbooks/api/v0/runs/{id}/checklists/{checklist}/item/{item}/run:
|
||||
put:
|
||||
summary: Run an item's slash command
|
||||
operationId: itemRun
|
||||
@ -1249,7 +1249,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/runs/{id}/timeline/{event_id}/:
|
||||
/plugins/playbooks/api/v0/runs/{id}/timeline/{event_id}/:
|
||||
delete:
|
||||
summary: Remove a timeline event from the playbook run
|
||||
operationId: removeTimelineEvent
|
||||
@ -1285,7 +1285,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/playbooks:
|
||||
/plugins/playbooks/api/v0/playbooks:
|
||||
get:
|
||||
summary: List all playbooks
|
||||
description: Retrieve a paged list of playbooks, filtered by team, and sorted by title, number of stages or number of steps.
|
||||
@ -1562,7 +1562,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/playbooks/{id}:
|
||||
/plugins/playbooks/api/v0/playbooks/{id}:
|
||||
get:
|
||||
summary: Get a playbook
|
||||
operationId: getPlaybook
|
||||
@ -1658,7 +1658,7 @@ paths:
|
||||
500:
|
||||
$ref: "#/components/responses/500"
|
||||
|
||||
/playbooks/{id}/autofollows:
|
||||
/plugins/playbooks/api/v0/playbooks/{id}/autofollows:
|
||||
get:
|
||||
summary: Get the list of followers' user IDs of a playbook
|
||||
operationId: getAutoFollows
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
|
||||
// See LICENSE.txt for license information.
|
||||
|
||||
package main
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
1
server/scripts/esrupgrades/README.md
Normal file
1
server/scripts/esrupgrades/README.md
Normal file
@ -0,0 +1 @@
|
||||
A collection of ad-hoc scripts to upgrade between ESRs.
|
160
server/scripts/esrupgrades/esr.5.37-6.3.mysql.cleanup.sql
Normal file
160
server/scripts/esrupgrades/esr.5.37-6.3.mysql.cleanup.sql
Normal file
@ -0,0 +1,160 @@
|
||||
/* Product notices are controlled externally, via the mattermost/notices repository.
|
||||
When there is a new notice specified there, the server may have time, right after
|
||||
the migration and before it is shut down, to download it and modify the
|
||||
ProductNoticeViewState table, adding a row for all users that have not seen it or
|
||||
removing old notices that no longer need to be shown. This can happen in the
|
||||
UpdateProductNotices function that is executed periodically to update the notices
|
||||
cache. The script will never do this, so we need to remove all rows in that table
|
||||
to avoid any unwanted diff. */
|
||||
DELETE FROM ProductNoticeViewState;
|
||||
|
||||
/* The script does not update the Systems row that tracks the version, so it is manually updated
|
||||
here so that it does not show in the diff. */
|
||||
UPDATE Systems SET Value = '6.3.0' WHERE Name = 'Version';
|
||||
|
||||
/* The script does not update the schema_migrations table, which is automatically used by the
|
||||
migrate library to track the version, so we drop it altogether to avoid spurious errors in
|
||||
the diff */
|
||||
DROP TABLE IF EXISTS schema_migrations;
|
||||
|
||||
/* Migration 000054_create_crt_channelmembership_count.up sets
|
||||
ChannelMembers.LastUpdateAt to the results of SELECT ROUND(UNIX_TIMESTAMP(NOW(3))*1000)
|
||||
which will be different each time the migration is run. Thus, the column will always be
|
||||
different when comparing the server and script migrations. To bypass this, we update all
|
||||
rows in ChannelMembers so that they contain the same value for such column. */
|
||||
UPDATE ChannelMembers SET LastUpdateAt = 1;
|
||||
|
||||
/* Migration 000055_create_crt_thread_count_and_unreads.up sets
|
||||
ThreadMemberships.LastUpdated to the results of SELECT ROUND(UNIX_TIMESTAMP(NOW(3))*1000)
|
||||
which will be different each time the migration is run. Thus, the column will always be
|
||||
different when comparing the server and script migrations. To bypass this, we update all
|
||||
rows in ThreadMemberships so that they contain the same value for such column. */
|
||||
UPDATE ThreadMemberships SET LastUpdated = 1;
|
||||
|
||||
/* The security update check in the server may update the LastSecurityTime system value. To
|
||||
avoid any spurious difference in the migrations, we update it to a fixed value. */
|
||||
UPDATE Systems SET Value = 1 WHERE Name = 'LastSecurityTime';
|
||||
|
||||
/* The server migration contains an in-app migration that adds new roles for Playbooks:
|
||||
doPlaybooksRolesCreationMigration, defined in https://github.com/mattermost/mattermost-server/blob/282bd351e3767dcfd8c8340da2e0915197c0dbcb/app/migrations.go#L345-L469
|
||||
The roles are the ones defined in https://github.com/mattermost/mattermost-server/blob/282bd351e3767dcfd8c8340da2e0915197c0dbcb/model/role.go#L874-L929
|
||||
When this migration finishes, it also adds a new row to the Systems table with the key of the migration.
|
||||
This in-app migration does not happen in the script, so we remove those rows here. */
|
||||
DELETE FROM Roles WHERE Name = 'playbook_member';
|
||||
DELETE FROM Roles WHERE Name = 'playbook_admin';
|
||||
DELETE FROM Roles WHERE Name = 'run_member';
|
||||
DELETE FROM Roles WHERE Name = 'run_admin';
|
||||
DELETE FROM Systems WHERE Name = 'PlaybookRolesCreationMigrationComplete';
|
||||
|
||||
/* The server migration contains an in-app migration that add playbooks permissions to certain roles:
|
||||
getAddPlaybooksPermissions, defined in https://github.com/mattermost/mattermost-server/blob/f9b996934cabf9a8fad5901835e7e9b418917402/app/permissions_migrations.go#L918-L951
|
||||
The specific roles ('%playbook%') are removed in the procedure below, but the migrations also add a new row to the Systems table marking the migration as complete.
|
||||
This in-app migration does not happen in the script, so we remove that rows here. */
|
||||
DELETE FROM Systems WHERE Name = 'playbooks_permissions';
|
||||
|
||||
/* The rest of this script defines and executes a procedure to update the Roles table. It performs several changes:
|
||||
1. Set the UpdateAt column of all rows to a fixed value, so that the server migration changes to this column
|
||||
do not appear in the diff.
|
||||
2. Remove the set of specific permissions added in the server migration that is not covered by the script, as
|
||||
this logic happens all in-app after the normal DB migrations.
|
||||
3. Set a consistent order in the Permissions column, which is modelled a space-separated string containing each of
|
||||
the different permissions each role has. This change is the reason why we need a complex procedure, which creates
|
||||
a temporary table that pairs each single permission to its corresponding ID. So if the Roles table contains two
|
||||
rows like:
|
||||
Id: 'abcd'
|
||||
Permissions: 'view_team read_public_channel invite_user'
|
||||
Id: 'efgh'
|
||||
Permissions: 'view_team create_emojis'
|
||||
then the new temporary table will contain five rows like:
|
||||
Id: 'abcd'
|
||||
Permissions: 'view_team'
|
||||
Id: 'abcd'
|
||||
Permissions: 'read_public_channel'
|
||||
Id: 'abcd'
|
||||
Permissions: 'invite_user'
|
||||
Id: 'efgh'
|
||||
Permissions: 'view_team'
|
||||
Id: 'efgh'
|
||||
Permissions: 'create_emojis'
|
||||
*/
|
||||
|
||||
DROP PROCEDURE IF EXISTS splitPermissions;
|
||||
DROP PROCEDURE IF EXISTS sortAndFilterPermissionsInRoles;
|
||||
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_roles;
|
||||
CREATE TEMPORARY TABLE temp_roles(id varchar(26), permission longtext);
|
||||
|
||||
DELIMITER //
|
||||
|
||||
/* Auxiliary procedure that splits the space-separated permissions string into single rows that are inserted
|
||||
in the temporary temp_roles table along with their corresponding ID. */
|
||||
CREATE PROCEDURE splitPermissions(
|
||||
IN id varchar(26),
|
||||
IN permissionsString longtext
|
||||
)
|
||||
BEGIN
|
||||
DECLARE idx INT DEFAULT 0;
|
||||
SELECT TRIM(permissionsString) INTO permissionsString;
|
||||
SELECT LOCATE(' ', permissionsString) INTO idx;
|
||||
WHILE idx > 0 DO
|
||||
INSERT INTO temp_roles SELECT id, TRIM(LEFT(permissionsString, idx));
|
||||
SELECT SUBSTR(permissionsString, idx+1) INTO permissionsString;
|
||||
SELECT LOCATE(' ', permissionsString) INTO idx;
|
||||
END WHILE;
|
||||
INSERT INTO temp_roles(id, permission) VALUES(id, TRIM(permissionsString));
|
||||
END; //
|
||||
|
||||
/* Main procedure that does update the Roles table */
|
||||
CREATE PROCEDURE sortAndFilterPermissionsInRoles()
|
||||
BEGIN
|
||||
DECLARE done INT DEFAULT FALSE;
|
||||
DECLARE rolesId varchar(26) DEFAULT '';
|
||||
DECLARE rolesPermissions longtext DEFAULT '';
|
||||
DECLARE cur1 CURSOR FOR SELECT Id, Permissions FROM Roles;
|
||||
DECLARE CONTINUE HANDLER FOR NOT FOUND SET done = TRUE;
|
||||
|
||||
/* 1. Set a fixed value in the UpdateAt column for all rows in Roles table */
|
||||
UPDATE Roles SET UpdateAt = 1;
|
||||
|
||||
/* Call splitPermissions for every row in the Roles table, thus populating the
|
||||
temp_roles table. */
|
||||
OPEN cur1;
|
||||
read_loop: LOOP
|
||||
FETCH cur1 INTO rolesId, rolesPermissions;
|
||||
IF done THEN
|
||||
LEAVE read_loop;
|
||||
END IF;
|
||||
CALL splitPermissions(rolesId, rolesPermissions);
|
||||
END LOOP;
|
||||
CLOSE cur1;
|
||||
|
||||
/* 2. Filter out the new permissions added by the in-app migrations */
|
||||
DELETE FROM temp_roles WHERE permission LIKE '%playbook%';
|
||||
DELETE FROM temp_roles WHERE permission LIKE 'run_create';
|
||||
DELETE FROM temp_roles WHERE permission LIKE 'run_manage_members';
|
||||
DELETE FROM temp_roles WHERE permission LIKE 'run_manage_properties';
|
||||
DELETE FROM temp_roles WHERE permission LIKE 'run_view';
|
||||
|
||||
/* Temporarily set to the maximum permitted value, since the call to group_concat
|
||||
below needs a value bigger than the default */
|
||||
SET group_concat_max_len = 18446744073709551615;
|
||||
|
||||
/* 3. Update the Permissions column in the Roles table with the filtered, sorted permissions,
|
||||
concatenated again as a space-separated string */
|
||||
UPDATE
|
||||
Roles INNER JOIN (
|
||||
SELECT temp_roles.id as Id, TRIM(group_concat(temp_roles.permission ORDER BY temp_roles.permission SEPARATOR ' ')) as Permissions
|
||||
FROM Roles JOIN temp_roles ON Roles.Id = temp_roles.id
|
||||
GROUP BY temp_roles.id
|
||||
) AS Sorted
|
||||
ON Roles.Id = Sorted.Id
|
||||
SET Roles.Permissions = Sorted.Permissions;
|
||||
|
||||
/* Reset group_concat_max_len to its default value */
|
||||
SET group_concat_max_len = 1024;
|
||||
END; //
|
||||
DELIMITER ;
|
||||
|
||||
CALL sortAndFilterPermissionsInRoles();
|
||||
|
||||
DROP TEMPORARY TABLE IF EXISTS temp_roles;
|
695
server/scripts/esrupgrades/esr.5.37-6.3.mysql.up.sql
Normal file
695
server/scripts/esrupgrades/esr.5.37-6.3.mysql.up.sql
Normal file
@ -0,0 +1,695 @@
|
||||
/* ==> mysql/000054_create_crt_channelmembership_count.up.sql <== */
|
||||
/* fixCRTChannelMembershipCounts fixes the channel counts, i.e. the total message count,
|
||||
total root message count, mention count, and mention count in root messages for users
|
||||
who have viewed the channel after the last post in the channel */
|
||||
|
||||
DELIMITER //
|
||||
CREATE PROCEDURE MigrateCRTChannelMembershipCounts ()
|
||||
BEGIN
|
||||
IF(
|
||||
SELECT
|
||||
EXISTS (
|
||||
SELECT
|
||||
* FROM Systems
|
||||
WHERE
|
||||
Name = 'CRTChannelMembershipCountsMigrationComplete') = 0) THEN
|
||||
UPDATE
|
||||
ChannelMembers
|
||||
INNER JOIN Channels ON Channels.Id = ChannelMembers.ChannelId SET
|
||||
MentionCount = 0, MentionCountRoot = 0, MsgCount = Channels.TotalMsgCount, MsgCountRoot = Channels.TotalMsgCountRoot, LastUpdateAt = (
|
||||
SELECT
|
||||
(SELECT ROUND(UNIX_TIMESTAMP(NOW(3))*1000)))
|
||||
WHERE
|
||||
ChannelMembers.LastViewedAt >= Channels.LastPostAt;
|
||||
INSERT INTO Systems
|
||||
VALUES('CRTChannelMembershipCountsMigrationComplete', 'true');
|
||||
END IF;
|
||||
END//
|
||||
DELIMITER ;
|
||||
CALL MigrateCRTChannelMembershipCounts ();
|
||||
DROP PROCEDURE IF EXISTS MigrateCRTChannelMembershipCounts;
|
||||
|
||||
/* ==> mysql/000055_create_crt_thread_count_and_unreads.up.sql <== */
|
||||
/* fixCRTThreadCountsAndUnreads Marks threads as read for users where the last
|
||||
reply time of the thread is earlier than the time the user viewed the channel.
|
||||
Marking a thread means setting the mention count to zero and setting the
|
||||
last viewed at time of the the thread as the last viewed at time
|
||||
of the channel */
|
||||
|
||||
DELIMITER //
|
||||
CREATE PROCEDURE MigrateCRTThreadCountsAndUnreads ()
|
||||
BEGIN
|
||||
IF(SELECT EXISTS(SELECT * FROM Systems WHERE Name = 'CRTThreadCountsAndUnreadsMigrationComplete') = 0) THEN
|
||||
UPDATE
|
||||
ThreadMemberships
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
PostId,
|
||||
UserId,
|
||||
ChannelMembers.LastViewedAt AS CM_LastViewedAt,
|
||||
Threads.LastReplyAt
|
||||
FROM
|
||||
Threads
|
||||
INNER JOIN ChannelMembers ON ChannelMembers.ChannelId = Threads.ChannelId
|
||||
WHERE
|
||||
Threads.LastReplyAt <= ChannelMembers.LastViewedAt) AS q ON ThreadMemberships.Postid = q.PostId
|
||||
AND ThreadMemberships.UserId = q.UserId SET LastViewed = q.CM_LastViewedAt + 1, UnreadMentions = 0, LastUpdated = (
|
||||
SELECT
|
||||
(SELECT ROUND(UNIX_TIMESTAMP(NOW(3))*1000)));
|
||||
INSERT INTO Systems
|
||||
VALUES('CRTThreadCountsAndUnreadsMigrationComplete', 'true');
|
||||
END IF;
|
||||
END//
|
||||
DELIMITER ;
|
||||
CALL MigrateCRTThreadCountsAndUnreads ();
|
||||
DROP PROCEDURE IF EXISTS MigrateCRTThreadCountsAndUnreads;
|
||||
|
||||
/* ==> mysql/000056_upgrade_channels_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Channels'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_channels_team_id_display_name'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_channels_team_id_display_name ON Channels(TeamId, DisplayName);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Channels'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_channels_team_id_type'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_channels_team_id_type ON Channels(TeamId, Type);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Channels'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_channels_team_id'
|
||||
) > 0,
|
||||
'DROP INDEX idx_channels_team_id ON Channels;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE removeIndexIfExists FROM @preparedStatement;
|
||||
EXECUTE removeIndexIfExists;
|
||||
DEALLOCATE PREPARE removeIndexIfExists;
|
||||
|
||||
/* ==> mysql/000057_upgrade_command_webhooks_v6.0.up.sql <== */
|
||||
|
||||
DELIMITER //
|
||||
CREATE PROCEDURE MigrateRootId_CommandWebhooks () BEGIN DECLARE ParentId_EXIST INT;
|
||||
SELECT COUNT(*)
|
||||
FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE TABLE_NAME = 'CommandWebhooks'
|
||||
AND table_schema = DATABASE()
|
||||
AND COLUMN_NAME = 'ParentId' INTO ParentId_EXIST;
|
||||
IF(ParentId_EXIST > 0) THEN
|
||||
UPDATE CommandWebhooks SET RootId = ParentId WHERE RootId = '' AND RootId != ParentId;
|
||||
END IF;
|
||||
END//
|
||||
DELIMITER ;
|
||||
CALL MigrateRootId_CommandWebhooks ();
|
||||
DROP PROCEDURE IF EXISTS MigrateRootId_CommandWebhooks;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'CommandWebhooks'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'ParentId'
|
||||
) > 0,
|
||||
'ALTER TABLE CommandWebhooks DROP COLUMN ParentId;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
/* ==> mysql/000058_upgrade_channelmembers_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'ChannelMembers'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'NotifyProps'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE ChannelMembers MODIFY COLUMN NotifyProps JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'ChannelMembers'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_channelmembers_user_id'
|
||||
) > 0,
|
||||
'DROP INDEX idx_channelmembers_user_id ON ChannelMembers;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE removeIndexIfExists FROM @preparedStatement;
|
||||
EXECUTE removeIndexIfExists;
|
||||
DEALLOCATE PREPARE removeIndexIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'ChannelMembers'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_channelmembers_user_id_channel_id_last_viewed_at'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_channelmembers_user_id_channel_id_last_viewed_at ON ChannelMembers(UserId, ChannelId, LastViewedAt);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'ChannelMembers'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_channelmembers_channel_id_scheme_guest_user_id'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_channelmembers_channel_id_scheme_guest_user_id ON ChannelMembers(ChannelId, SchemeGuest, UserId);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
/* ==> mysql/000059_upgrade_users_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Users'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Props'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE Users MODIFY COLUMN Props JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Users'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'NotifyProps'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE Users MODIFY COLUMN NotifyProps JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Users'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Timezone'
|
||||
AND column_default IS NOT NULL
|
||||
) > 0,
|
||||
'ALTER TABLE Users ALTER Timezone DROP DEFAULT;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Users'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Timezone'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE Users MODIFY COLUMN Timezone JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Users'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Roles'
|
||||
AND column_type != 'text'
|
||||
) > 0,
|
||||
'ALTER TABLE Users MODIFY COLUMN Roles text;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
/* ==> mysql/000060_upgrade_jobs_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Jobs'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Data'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE Jobs MODIFY COLUMN Data JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
|
||||
/* ==> mysql/000061_upgrade_link_metadata_v6.0.up.sql <== */
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'LinkMetadata'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Data'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE LinkMetadata MODIFY COLUMN Data JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
/* ==> mysql/000062_upgrade_sessions_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Sessions'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Props'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE Sessions MODIFY COLUMN Props JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
|
||||
/* ==> mysql/000063_upgrade_threads_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Threads'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Participants'
|
||||
AND column_type != 'JSON'
|
||||
) > 0,
|
||||
'ALTER TABLE Threads MODIFY COLUMN Participants JSON;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Threads'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_threads_channel_id_last_reply_at'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_threads_channel_id_last_reply_at ON Threads(ChannelId, LastReplyAt);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Threads'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_threads_channel_id'
|
||||
) > 0,
|
||||
'DROP INDEX idx_threads_channel_id ON Threads;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE removeIndexIfExists FROM @preparedStatement;
|
||||
EXECUTE removeIndexIfExists;
|
||||
DEALLOCATE PREPARE removeIndexIfExists;
|
||||
|
||||
/* ==> mysql/000064_upgrade_status_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Status'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_status_status_dndendtime'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_status_status_dndendtime ON Status(Status, DNDEndTime);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Status'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_status_status'
|
||||
) > 0,
|
||||
'DROP INDEX idx_status_status ON Status;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE removeIndexIfExists FROM @preparedStatement;
|
||||
EXECUTE removeIndexIfExists;
|
||||
DEALLOCATE PREPARE removeIndexIfExists;
|
||||
|
||||
/* ==> mysql/000065_upgrade_groupchannels_v6.0.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'GroupChannels'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_groupchannels_schemeadmin'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_groupchannels_schemeadmin ON GroupChannels(SchemeAdmin);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
/* ==> mysql/000066_upgrade_posts_v6.0.up.sql <== */
|
||||
DELIMITER //
|
||||
CREATE PROCEDURE MigrateRootId_Posts ()
|
||||
BEGIN
|
||||
DECLARE ParentId_EXIST INT;
|
||||
DECLARE Alter_FileIds INT;
|
||||
DECLARE Alter_Props INT;
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE TABLE_NAME = 'Posts'
|
||||
AND table_schema = DATABASE()
|
||||
AND COLUMN_NAME = 'ParentId' INTO ParentId_EXIST;
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Posts'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'FileIds'
|
||||
AND column_type != 'text' INTO Alter_FileIds;
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Posts'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Props'
|
||||
AND column_type != 'JSON' INTO Alter_Props;
|
||||
IF (Alter_Props OR Alter_FileIds) THEN
|
||||
IF(ParentId_EXIST > 0) THEN
|
||||
UPDATE Posts SET RootId = ParentId WHERE RootId = '' AND RootId != ParentId;
|
||||
ALTER TABLE Posts MODIFY COLUMN FileIds text, MODIFY COLUMN Props JSON, DROP COLUMN ParentId;
|
||||
ELSE
|
||||
ALTER TABLE Posts MODIFY COLUMN FileIds text, MODIFY COLUMN Props JSON;
|
||||
END IF;
|
||||
END IF;
|
||||
END//
|
||||
DELIMITER ;
|
||||
CALL MigrateRootId_Posts ();
|
||||
DROP PROCEDURE IF EXISTS MigrateRootId_Posts;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Posts'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_posts_root_id_delete_at'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_posts_root_id_delete_at ON Posts(RootId, DeleteAt);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Posts'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_posts_root_id'
|
||||
) > 0,
|
||||
'DROP INDEX idx_posts_root_id ON Posts;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE removeIndexIfExists FROM @preparedStatement;
|
||||
EXECUTE removeIndexIfExists;
|
||||
DEALLOCATE PREPARE removeIndexIfExists;
|
||||
|
||||
/* ==> mysql/000067_upgrade_channelmembers_v6.1.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'ChannelMembers'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Roles'
|
||||
AND column_type != 'text'
|
||||
) > 0,
|
||||
'ALTER TABLE ChannelMembers MODIFY COLUMN Roles text;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
/* ==> mysql/000068_upgrade_teammembers_v6.1.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'TeamMembers'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Roles'
|
||||
AND column_type != 'text'
|
||||
) > 0,
|
||||
'ALTER TABLE TeamMembers MODIFY COLUMN Roles text;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
/* ==> mysql/000069_upgrade_jobs_v6.1.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.STATISTICS
|
||||
WHERE table_name = 'Jobs'
|
||||
AND table_schema = DATABASE()
|
||||
AND index_name = 'idx_jobs_status_type'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'CREATE INDEX idx_jobs_status_type ON Jobs(Status, Type);'
|
||||
));
|
||||
|
||||
PREPARE createIndexIfNotExists FROM @preparedStatement;
|
||||
EXECUTE createIndexIfNotExists;
|
||||
DEALLOCATE PREPARE createIndexIfNotExists;
|
||||
|
||||
/* ==> mysql/000070_upgrade_cte_v6.1.up.sql <== */
|
||||
DELIMITER //
|
||||
CREATE PROCEDURE Migrate_LastRootPostAt ()
|
||||
BEGIN
|
||||
DECLARE
|
||||
LastRootPostAt_EXIST INT;
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM
|
||||
INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE
|
||||
TABLE_NAME = 'Channels'
|
||||
AND table_schema = DATABASE()
|
||||
AND COLUMN_NAME = 'LastRootPostAt' INTO LastRootPostAt_EXIST;
|
||||
IF(LastRootPostAt_EXIST = 0) THEN
|
||||
ALTER TABLE Channels ADD COLUMN LastRootPostAt bigint DEFAULT 0;
|
||||
UPDATE
|
||||
Channels
|
||||
INNER JOIN (
|
||||
SELECT
|
||||
Channels.Id channelid,
|
||||
COALESCE(MAX(Posts.CreateAt), 0) AS lastrootpost
|
||||
FROM
|
||||
Channels
|
||||
LEFT JOIN Posts FORCE INDEX (idx_posts_channel_id_update_at) ON Channels.Id = Posts.ChannelId
|
||||
WHERE
|
||||
Posts.RootId = ''
|
||||
GROUP BY
|
||||
Channels.Id) AS q ON q.channelid = Channels.Id SET LastRootPostAt = lastrootpost;
|
||||
END IF;
|
||||
END//
|
||||
DELIMITER ;
|
||||
CALL Migrate_LastRootPostAt ();
|
||||
DROP PROCEDURE IF EXISTS Migrate_LastRootPostAt;
|
||||
|
||||
/* ==> mysql/000071_upgrade_sessions_v6.1.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Sessions'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'Roles'
|
||||
AND column_type != 'text'
|
||||
) > 0,
|
||||
'ALTER TABLE Sessions MODIFY COLUMN Roles text;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
||||
|
||||
/* ==> mysql/000072_upgrade_schemes_v6.3.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Schemes'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'DefaultPlaybookAdminRole'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'ALTER TABLE Schemes ADD COLUMN DefaultPlaybookAdminRole VARCHAR(64) DEFAULT "";'
|
||||
));
|
||||
|
||||
PREPARE alterIfNotExists FROM @preparedStatement;
|
||||
EXECUTE alterIfNotExists;
|
||||
DEALLOCATE PREPARE alterIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Schemes'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'DefaultPlaybookMemberRole'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'ALTER TABLE Schemes ADD COLUMN DefaultPlaybookMemberRole VARCHAR(64) DEFAULT "";'
|
||||
));
|
||||
|
||||
PREPARE alterIfNotExists FROM @preparedStatement;
|
||||
EXECUTE alterIfNotExists;
|
||||
DEALLOCATE PREPARE alterIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Schemes'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'DefaultRunAdminRole'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'ALTER TABLE Schemes ADD COLUMN DefaultRunAdminRole VARCHAR(64) DEFAULT "";'
|
||||
));
|
||||
|
||||
PREPARE alterIfNotExists FROM @preparedStatement;
|
||||
EXECUTE alterIfNotExists;
|
||||
DEALLOCATE PREPARE alterIfNotExists;
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Schemes'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'DefaultRunMemberRole'
|
||||
) > 0,
|
||||
'SELECT 1',
|
||||
'ALTER TABLE Schemes ADD COLUMN DefaultRunMemberRole VARCHAR(64) DEFAULT "";'
|
||||
));
|
||||
|
||||
PREPARE alterIfNotExists FROM @preparedStatement;
|
||||
EXECUTE alterIfNotExists;
|
||||
DEALLOCATE PREPARE alterIfNotExists;
|
||||
|
||||
/* ==> mysql/000073_upgrade_plugin_key_value_store_v6.3.up.sql <== */
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT Count(*) FROM Information_Schema.Columns
|
||||
WHERE table_name = 'PluginKeyValueStore'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'PKey'
|
||||
AND column_type != 'varchar(150)'
|
||||
) > 0,
|
||||
'ALTER TABLE PluginKeyValueStore MODIFY COLUMN PKey varchar(150);',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterTypeIfExists FROM @preparedStatement;
|
||||
EXECUTE alterTypeIfExists;
|
||||
DEALLOCATE PREPARE alterTypeIfExists;
|
||||
|
||||
/* ==> mysql/000074_upgrade_users_v6.3.up.sql <== */
|
||||
|
||||
SET @preparedStatement = (SELECT IF(
|
||||
(
|
||||
SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_name = 'Users'
|
||||
AND table_schema = DATABASE()
|
||||
AND column_name = 'AcceptedTermsOfServiceId'
|
||||
) > 0,
|
||||
'ALTER TABLE Users DROP COLUMN AcceptedTermsOfServiceId;',
|
||||
'SELECT 1'
|
||||
));
|
||||
|
||||
PREPARE alterIfExists FROM @preparedStatement;
|
||||
EXECUTE alterIfExists;
|
||||
DEALLOCATE PREPARE alterIfExists;
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user